hexsha stringlengths 40 40 | size int64 4 996k | ext stringclasses 8
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 4 245 | max_stars_repo_name stringlengths 6 130 | max_stars_repo_head_hexsha stringlengths 40 40 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 245 | max_issues_repo_name stringlengths 6 130 | max_issues_repo_head_hexsha stringlengths 40 40 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 245 | max_forks_repo_name stringlengths 6 130 | max_forks_repo_head_hexsha stringlengths 40 40 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 4 996k | avg_line_length float64 1.33 58.2k | max_line_length int64 2 323k | alphanum_fraction float64 0 0.97 | content_no_comment stringlengths 0 946k | is_comment_constant_removed bool 2
classes | is_sharp_comment_removed bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
f71eb7670bc86d87c7539abbd12daee35e23c879 | 6,054 | py | Python | object_detection/test_mobilenet.py | OSSDC/OSSDC-VisionBasedACC | a9004c888e91b8becaebc22524f698ebb3c9746e | [
"Apache-2.0"
] | 112 | 2017-04-14T15:34:32.000Z | 2021-05-02T21:14:09.000Z | object_detection/test_mobilenet.py | AndreSlavescu/OSSDC-VisionBasedACC | a9004c888e91b8becaebc22524f698ebb3c9746e | [
"Apache-2.0"
] | 20 | 2017-04-11T12:24:03.000Z | 2019-11-14T00:49:46.000Z | object_detection/test_mobilenet.py | AndreSlavescu/OSSDC-VisionBasedACC | a9004c888e91b8becaebc22524f698ebb3c9746e | [
"Apache-2.0"
] | 46 | 2017-07-29T04:57:17.000Z | 2022-01-25T03:57:56.000Z | import numpy as np
import os
import six.moves.urllib as urllib
import sys
import tarfile
import tensorflow as tf
import zipfile
from collections import defaultdict
from io import StringIO
from matplotlib import pyplot as plt
from PIL import Image
import cv2
from webcamvideostream import *
videoUrl = 1
videoUrl = "/sharefolder/sdc/sdc-data/ossdc-simulator-TheCrew-PS4-30fps.mp4"
webcam = False
#webcam = True
sct = None
ret = True
if webcam:
cap = WebcamVideoStream(videoUrl,(1280,720),30)
cap.start()
else:
cap = cv2.VideoCapture(videoUrl)
# This is needed since the notebook is stored in the object_detection folder.
sys.path.append("..")
# ## Object detection imports
# Here are the imports from the object detection module.
# In[3]:
from utils import label_map_util
from utils import visualization_utils as vis_util
# # Model preparation
# ## Variables
#
# Any model exported using the `export_inference_graph.py` tool can be loaded here simply by changing `PATH_TO_CKPT` to point to a new .pb file.
#
# By default we use an "SSD with Mobilenet" model here. See the [detection model zoo](https://github.com/tensorflow/models/blob/master/object_detection/g3doc/detection_model_zoo.md) for a list of other models that can be run out-of-the-box with varying speeds and accuracies.
# In[4]:
# What model to download.
MODEL_NAME = 'ssd_mobilenet_v1_coco_11_06_2017'
MODEL_FILE = MODEL_NAME + '.tar.gz'
DOWNLOAD_BASE = 'http://download.tensorflow.org/models/object_detection/'
# Path to frozen detection graph. This is the actual model that is used for the object detection.
PATH_TO_CKPT = MODEL_NAME + '/frozen_inference_graph.pb'
# List of the strings that is used to add correct label for each box.
PATH_TO_LABELS = os.path.join('data', 'mscoco_label_map.pbtxt')
NUM_CLASSES = 90
# ## Download Model
# In[5]:
'''
opener = urllib.request.URLopener()
opener.retrieve(DOWNLOAD_BASE + MODEL_FILE, MODEL_FILE)
tar_file = tarfile.open(MODEL_FILE)
for file in tar_file.getmembers():
file_name = os.path.basename(file.name)
if 'frozen_inference_graph.pb' in file_name:
tar_file.extract(file, os.getcwd())
'''
# ## Load a (frozen) Tensorflow model into memory.
# In[6]:
detection_graph = tf.Graph()
with detection_graph.as_default():
od_graph_def = tf.GraphDef()
with tf.gfile.GFile(PATH_TO_CKPT, 'rb') as fid:
serialized_graph = fid.read()
od_graph_def.ParseFromString(serialized_graph)
tf.import_graph_def(od_graph_def, name='')
# ## Loading label map
# Label maps map indices to category names, so that when our convolution network predicts `5`, we know that this corresponds to `airplane`. Here we use internal utility functions, but anything that returns a dictionary mapping integers to appropriate string labels would be fine
# In[7]:
label_map = label_map_util.load_labelmap(PATH_TO_LABELS)
categories = label_map_util.convert_label_map_to_categories(label_map, max_num_classes=NUM_CLASSES, use_display_name=True)
category_index = label_map_util.create_category_index(categories)
# ## Helper code
# In[8]:
def load_image_into_numpy_array(image):
(im_width, im_height) = image.size
return np.array(image.getdata()).reshape(
(im_height, im_width, 3)).astype(np.uint8)
# # Detection
# In[9]:
# For the sake of simplicity we will use only 2 images:
# image1.jpg
# image2.jpg
# If you want to test the code with your images, just add path to the images to the TEST_IMAGE_PATHS.
PATH_TO_TEST_IMAGES_DIR = 'test_images'
TEST_IMAGE_PATHS = [ os.path.join(PATH_TO_TEST_IMAGES_DIR, 'image{}.jpg'.format(i)) for i in range(1, 3) ]
# Size, in inches, of the output images.
IMAGE_SIZE = (12, 8)
# In[10]:
from datetime import datetime
precision = 10
def getCurrentClock():
#return time.clock()
return datetime.now()
frameCnt=0
prevFrameCnt=0
prevTime=getCurrentClock()
with detection_graph.as_default():
with tf.Session(graph=detection_graph) as sess:
while True:
if sct is not None or webcam or cap.grab():
if sct is not None:
frame = numpy.asarray(sct.grab(mon))
else:
if webcam:
frame = cap.read()
else:
flag, frame = cap.retrieve()
if not flag:
continue
image_np = frame
# Expand dimensions since the model expects images to have shape: [1, None, None, 3]
image_np_expanded = np.expand_dims(image_np, axis=0)
image_tensor = detection_graph.get_tensor_by_name('image_tensor:0')
# Each box represents a part of the image where a particular object was detected.
boxes = detection_graph.get_tensor_by_name('detection_boxes:0')
# Each score represent how level of confidence for each of the objects.
# Score is shown on the result image, together with the class label.
scores = detection_graph.get_tensor_by_name('detection_scores:0')
classes = detection_graph.get_tensor_by_name('detection_classes:0')
num_detections = detection_graph.get_tensor_by_name('num_detections:0')
# Actual detection.
(boxes, scores, classes, num_detections) = sess.run(
[boxes, scores, classes, num_detections],
feed_dict={image_tensor: image_np_expanded})
# Visualization of the results of a detection.
vis_util.visualize_boxes_and_labels_on_image_array(
image_np,
np.squeeze(boxes),
np.squeeze(classes).astype(np.int32),
np.squeeze(scores),
category_index,
use_normalized_coordinates=True,
line_thickness=8)
frameCnt=frameCnt+1
nowMicro = getCurrentClock()
delta = (nowMicro-prevTime).total_seconds()
if delta>=1.0:
fpsValue = ((frameCnt-prevFrameCnt)/delta)
print("FPS = %3.2f, Frame = %6d" % (fpsValue, frameCnt))
prevFrameCnt=frameCnt
cv2.imshow('object detection', cv2.resize(image_np, (800,600)))
if cv2.waitKey(25) & 0xFF == ord('q'):
cv2.destroyAllWindows()
break | 31.046154 | 279 | 0.713578 | import numpy as np
import os
import six.moves.urllib as urllib
import sys
import tarfile
import tensorflow as tf
import zipfile
from collections import defaultdict
from io import StringIO
from matplotlib import pyplot as plt
from PIL import Image
import cv2
from webcamvideostream import *
videoUrl = 1
videoUrl = "/sharefolder/sdc/sdc-data/ossdc-simulator-TheCrew-PS4-30fps.mp4"
webcam = False
sct = None
ret = True
if webcam:
cap = WebcamVideoStream(videoUrl,(1280,720),30)
cap.start()
else:
cap = cv2.VideoCapture(videoUrl)
sys.path.append("..")
ort visualization_utils as vis_util
oco_11_06_2017'
MODEL_FILE = MODEL_NAME + '.tar.gz'
DOWNLOAD_BASE = 'http://download.tensorflow.org/models/object_detection/'
PATH_TO_CKPT = MODEL_NAME + '/frozen_inference_graph.pb'
PATH_TO_LABELS = os.path.join('data', 'mscoco_label_map.pbtxt')
NUM_CLASSES = 90
H_TO_CKPT, 'rb') as fid:
serialized_graph = fid.read()
od_graph_def.ParseFromString(serialized_graph)
tf.import_graph_def(od_graph_def, name='')
belmap(PATH_TO_LABELS)
categories = label_map_util.convert_label_map_to_categories(label_map, max_num_classes=NUM_CLASSES, use_display_name=True)
category_index = label_map_util.create_category_index(categories)
py_array(image):
(im_width, im_height) = image.size
return np.array(image.getdata()).reshape(
(im_height, im_width, 3)).astype(np.uint8)
H_TO_TEST_IMAGES_DIR = 'test_images'
TEST_IMAGE_PATHS = [ os.path.join(PATH_TO_TEST_IMAGES_DIR, 'image{}.jpg'.format(i)) for i in range(1, 3) ]
IMAGE_SIZE = (12, 8)
from datetime import datetime
precision = 10
def getCurrentClock():
return datetime.now()
frameCnt=0
prevFrameCnt=0
prevTime=getCurrentClock()
with detection_graph.as_default():
with tf.Session(graph=detection_graph) as sess:
while True:
if sct is not None or webcam or cap.grab():
if sct is not None:
frame = numpy.asarray(sct.grab(mon))
else:
if webcam:
frame = cap.read()
else:
flag, frame = cap.retrieve()
if not flag:
continue
image_np = frame
image_np_expanded = np.expand_dims(image_np, axis=0)
image_tensor = detection_graph.get_tensor_by_name('image_tensor:0')
boxes = detection_graph.get_tensor_by_name('detection_boxes:0')
scores = detection_graph.get_tensor_by_name('detection_scores:0')
classes = detection_graph.get_tensor_by_name('detection_classes:0')
num_detections = detection_graph.get_tensor_by_name('num_detections:0')
(boxes, scores, classes, num_detections) = sess.run(
[boxes, scores, classes, num_detections],
feed_dict={image_tensor: image_np_expanded})
vis_util.visualize_boxes_and_labels_on_image_array(
image_np,
np.squeeze(boxes),
np.squeeze(classes).astype(np.int32),
np.squeeze(scores),
category_index,
use_normalized_coordinates=True,
line_thickness=8)
frameCnt=frameCnt+1
nowMicro = getCurrentClock()
delta = (nowMicro-prevTime).total_seconds()
if delta>=1.0:
fpsValue = ((frameCnt-prevFrameCnt)/delta)
print("FPS = %3.2f, Frame = %6d" % (fpsValue, frameCnt))
prevFrameCnt=frameCnt
cv2.imshow('object detection', cv2.resize(image_np, (800,600)))
if cv2.waitKey(25) & 0xFF == ord('q'):
cv2.destroyAllWindows()
break | true | true |
f71eb8bdfedae5608421ec5e72f402a3efef2d6a | 10,473 | py | Python | bindings/cython/utils/gencython.py | windelbouwman/datoviz | 2a8c54c06185d9ebaa2d5323b9d064b58b60133d | [
"MIT"
] | null | null | null | bindings/cython/utils/gencython.py | windelbouwman/datoviz | 2a8c54c06185d9ebaa2d5323b9d064b58b60133d | [
"MIT"
] | null | null | null | bindings/cython/utils/gencython.py | windelbouwman/datoviz | 2a8c54c06185d9ebaa2d5323b9d064b58b60133d | [
"MIT"
] | null | null | null | from functools import lru_cache
from pathlib import Path
import os
from pprint import pprint
import re
import sys
from textwrap import indent
import pyparsing as pp
from pyparsing import (
Suppress, Word, alphas, alphanums, nums, Optional, Group, ZeroOrMore, empty, restOfLine,
Keyword, cStyleComment, Empty, Literal
)
HEADER_DIR = (Path(__file__).parent / '../../../include/datoviz').resolve()
INTERNAL_HEADER_DIR = (Path(__file__).parent / '../../../src').resolve()
EXTERNAL_HEADER_DIR = HEADER_DIR / '../../external'
CYTHON_OUTPUT = (Path(__file__).parent / '../datoviz/cydatoviz.pxd').resolve()
HEADER_FILES = (
'app.h', 'vklite.h', 'context.h', 'canvas.h', 'keycode.h', 'transforms.h', 'colormaps.h',
'array.h', 'mesh.h', 'controls.h', 'graphics.h', 'builtin_visuals.h', 'panel.h',
'visuals.h', 'scene.h', 'transfers.h')
STRUCTS = (
'DvzEvent',
'DvzEventUnion',
'DvzFrameEvent',
'DvzKeyEvent',
'DvzMouseButtonEvent',
'DvzMouseClickEvent',
'DvzMouseDragEvent',
'DvzMouseMoveEvent',
'DvzMouseWheelEvent',
'DvzRefillEvent',
'DvzGuiEvent',
'DvzResizeEvent',
'DvzScreencastEvent',
'DvzSubmitEvent',
'DvzTimerEvent',
'DvzViewport',
)
ENUM_START = '# ENUM START'
ENUM_END = '# ENUM END'
STRUCT_START = '# STRUCT START'
STRUCT_END = '# STRUCT END'
UNION_START = '# UNION START'
UNION_END = '# UNION END'
FUNCTION_START = '# FUNCTION START'
FUNCTION_END = '# FUNCTION END'
# File explorer and manipulation
# -------------------------------------------------------------------------------------------------
def iter_header_files():
for h in sorted(HEADER_DIR.glob('*.h')):
yield h
for h in sorted(INTERNAL_HEADER_DIR.glob('*.h')):
yield h
# for h in (INTERNAL_HEADER_DIR / 'log.h',):
# yield h
def read_file(filename):
text = filename.read_text()
return _remove_comments(text)
def insert_into_file(filename, start, end, insert):
text = filename.read_text()
i0 = text.index(start)
i1 = text.index(end)
out = text[:i0 + len(start) + 1]
out += indent(insert, ' ')
out += text[i1 - 5:]
filename.write_text(out)
def _remove_comments(text):
return '\n'.join([l.split('//')[0] for l in text.splitlines()])
# C header parsing
# -------------------------------------------------------------------------------------------------
def parse_defines(text):
defines = re.findall(
r"#define (C[A-Z\_0-9]+)\s+([^\n]+)", text, re.MULTILINE)
defines = dict(defines)
defines = {k: v.replace('(', '').replace(')', '')
for k, v in defines.items()}
for k, v in defines.items():
if v.isdigit():
defines[k] = int(v)
for k, v in defines.items():
if isinstance(v, str) and '+' not in v:
defines[k] = defines[v]
for k, v in defines.items():
if isinstance(v, str) and '+' in v:
defines[k] = defines[v.split(' + ')[0]] + \
defines[v.split(' + ')[1]]
return defines
# _STRUCT_NAMES = ('DvzPrivateEvent', 'DvzEvent')
def _parse_enum(text):
enums = {}
# syntax we don't want to see in the final parse tree
LBRACE, RBRACE, EQ, COMMA, SEMICOLON = map(Suppress, "{}=,;")
_enum = Suppress("typedef enum")
identifier = Word(alphanums + "_+-")
enumValue = Group(
identifier("name") +
Optional(EQ + identifier("value")) +
Optional(COMMA) +
Optional(Suppress(cStyleComment)))
enumList = Group(enumValue + ZeroOrMore(enumValue))
enum = _enum + LBRACE + \
enumList("names") + RBRACE + identifier("enum") + SEMICOLON
for item, start, stop in enum.scanString(text):
l = []
for i, entry in enumerate(item.names):
if entry.value.isdigit():
entry.value = int(entry.value)
elif not entry.value:
entry.value = i
elif entry.value in ('false', 'true'):
entry.value = entry.value.capitalize()
l.append((entry.name, entry.value))
enums[item.enum] = l
return enums
def _gen_enum(enums):
out = ''
for name, l in enums.items():
out += f'ctypedef enum {name}:\n'
for identifier, value in l:
out += f' {identifier} = {value}\n'
out += '\n'
return out
def _parse_struct(text):
structs = {}
# syntax we don't want to see in the final parse tree
LBRACE, RBRACE, COMMA, SEMICOLON = map(Suppress, "{},;")
_struct = Literal("struct") ^ Literal("union")
const = Keyword("const")
dtype = Word(alphanums + "_*")
identifier = Word(alphanums + "_[]")
structDecl = Group(Optional(const("const")) +
dtype("dtype") + identifier("name") + SEMICOLON)
structList = Group(structDecl + ZeroOrMore(structDecl))
struct = _struct('struct') + identifier("struct_name") + LBRACE + \
structList("names") + RBRACE + SEMICOLON
for item, start, stop in struct.scanString(text):
l = []
for i, entry in enumerate(item.names):
l.append((entry.const, entry.dtype, entry.name))
structs[item.struct_name] = (item.struct, l)
return structs
def _gen_struct(structs):
out = ''
for name, (struct, l) in structs.items():
if name in STRUCTS or name.startswith('DvzGui'):
out += f'ctypedef {struct} {name}:\n'
for const, dtype, identifier in l:
if dtype == 'bool':
dtype = 'bint'
if const:
dtype = "const " + dtype
out += f' {dtype} {identifier}\n'
out += '\n'
return out
def _parse_func(text, is_output=False):
if is_output:
text = text[text.index(FUNCTION_START):text.index(FUNCTION_END)]
funcs = {}
# syntax we don't want to see in the final parse tree
LPAR, RPAR, LBRACE, RBRACE, COMMA, SEMICOLON = map(Suppress, "(){},;")
const = Keyword("const")
dtype = Word(alphanums + "_*")
identifier = Word(alphanums + "_")
argDecl = Group(
Optional(const("const")) +
dtype("dtype") +
Optional(identifier("name")
) + Optional(COMMA))
args = Group(ZeroOrMore(argDecl))
if not is_output:
func = Suppress("DVZ_EXPORT")
else:
func = Empty()
func = func + \
dtype("out") + \
identifier("name") + \
LPAR + args("args") + RPAR + \
Optional(SEMICOLON)
for item, start, stop in func.scanString(text):
args = []
for i, entry in enumerate(item.args):
args.append((entry.const, entry.dtype, entry.name))
funcs[item.name] = (item.out, tuple(args))
return funcs
@lru_cache(maxsize=64)
def _camel_to_snake(name):
name = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', name)
return re.sub('([a-z0-9])([A-Z])', r'\1_\2', name).lower()
def _gen_cython_func(name, func):
out, args = func
args_s = []
for const, dtype, argname in args:
if not argname:
if 'int' in dtype:
argname = 'n%d' % len(args_s)
elif dtype == 'float':
argname = 'value'
elif 'vec' in dtype:
argname = 'vec'
elif dtype == 'void*':
argname = 'buf'
elif 'char' in dtype:
argname = 's'
elif dtype == 'void':
dtype = ''
argname = ''
elif dtype == 'bool':
argname = 'value'
elif dtype == 'size_t':
argname = 'size'
elif 'Dvz' in dtype:
argname = _camel_to_snake(
dtype.replace('Dvz', '')).replace('*', '')
else:
raise ValueError(dtype)
if const:
dtype = "const " + dtype
if dtype == 'bool':
dtype = 'bint'
args_s.append(f'{dtype} {argname}')
args = ', '.join(args_s)
return f'{out} {name}({args})'
if __name__ == '__main__':
enums_to_insert = ''
structs_to_insert = ''
funcs_to_insert = ''
# Parse already-defined functions in the pxd
already_defined_funcs = _parse_func(
read_file(CYTHON_OUTPUT), is_output=True)
for filename in iter_header_files():
if filename.name not in HEADER_FILES:
continue
text = read_file(filename)
# Parse the enums
enums = _parse_enum(text)
# Generate the Cython enum definitions
generated = _gen_enum(enums)
if generated:
enums_to_insert += f'# from file: {filename.name}\n\n{generated}'
if 'color' in str(filename):
defines = parse_defines(text)
for key, val in defines.items():
enums_to_insert = enums_to_insert.replace(key, str(val))
# Parse the structs
structs = _parse_struct(text)
# Generate the Cython struct definitions
generated = _gen_struct(structs)
if generated:
structs_to_insert += f'# from file: {filename.name}\n\n{generated}'
if 'controls' in str(filename):
defines = parse_defines(text)
for key, val in defines.items():
structs_to_insert = structs_to_insert.replace(key, str(val))
# Parse the functions
funcs = _parse_func(text)
h = f'# from file: {filename.name}\n'
funcs_to_insert += h
generated = ''
for name, func in funcs.items():
existing = already_defined_funcs.pop(name, None)
if not existing:
continue
generated = _gen_cython_func(name, func)
funcs_to_insert += generated + '\n'
if not generated:
funcs_to_insert = funcs_to_insert[:-len(h)]
else:
funcs_to_insert += '\n'
if already_defined_funcs.keys():
print(already_defined_funcs)
raise RuntimeError(
"Some Cython function bindings are missing, check gencython.py")
# Insert into the Cython file
insert_into_file(
CYTHON_OUTPUT, ENUM_START, ENUM_END, enums_to_insert)
insert_into_file(
CYTHON_OUTPUT, STRUCT_START, STRUCT_END, structs_to_insert)
# insert_into_file(
# CYTHON_OUTPUT, UNION_START, UNION_END, unions_to_insert)
insert_into_file(
CYTHON_OUTPUT, FUNCTION_START, FUNCTION_END, funcs_to_insert)
| 31.736364 | 99 | 0.564117 | from functools import lru_cache
from pathlib import Path
import os
from pprint import pprint
import re
import sys
from textwrap import indent
import pyparsing as pp
from pyparsing import (
Suppress, Word, alphas, alphanums, nums, Optional, Group, ZeroOrMore, empty, restOfLine,
Keyword, cStyleComment, Empty, Literal
)
HEADER_DIR = (Path(__file__).parent / '../../../include/datoviz').resolve()
INTERNAL_HEADER_DIR = (Path(__file__).parent / '../../../src').resolve()
EXTERNAL_HEADER_DIR = HEADER_DIR / '../../external'
CYTHON_OUTPUT = (Path(__file__).parent / '../datoviz/cydatoviz.pxd').resolve()
HEADER_FILES = (
'app.h', 'vklite.h', 'context.h', 'canvas.h', 'keycode.h', 'transforms.h', 'colormaps.h',
'array.h', 'mesh.h', 'controls.h', 'graphics.h', 'builtin_visuals.h', 'panel.h',
'visuals.h', 'scene.h', 'transfers.h')
STRUCTS = (
'DvzEvent',
'DvzEventUnion',
'DvzFrameEvent',
'DvzKeyEvent',
'DvzMouseButtonEvent',
'DvzMouseClickEvent',
'DvzMouseDragEvent',
'DvzMouseMoveEvent',
'DvzMouseWheelEvent',
'DvzRefillEvent',
'DvzGuiEvent',
'DvzResizeEvent',
'DvzScreencastEvent',
'DvzSubmitEvent',
'DvzTimerEvent',
'DvzViewport',
)
ENUM_START = '# ENUM START'
ENUM_END = '# ENUM END'
STRUCT_START = '# STRUCT START'
STRUCT_END = '# STRUCT END'
UNION_START = '# UNION START'
UNION_END = '# UNION END'
FUNCTION_START = '# FUNCTION START'
FUNCTION_END = '# FUNCTION END'
def iter_header_files():
for h in sorted(HEADER_DIR.glob('*.h')):
yield h
for h in sorted(INTERNAL_HEADER_DIR.glob('*.h')):
yield h
def read_file(filename):
text = filename.read_text()
return _remove_comments(text)
def insert_into_file(filename, start, end, insert):
text = filename.read_text()
i0 = text.index(start)
i1 = text.index(end)
out = text[:i0 + len(start) + 1]
out += indent(insert, ' ')
out += text[i1 - 5:]
filename.write_text(out)
def _remove_comments(text):
return '\n'.join([l.split('//')[0] for l in text.splitlines()])
def parse_defines(text):
defines = re.findall(
r"#define (C[A-Z\_0-9]+)\s+([^\n]+)", text, re.MULTILINE)
defines = dict(defines)
defines = {k: v.replace('(', '').replace(')', '')
for k, v in defines.items()}
for k, v in defines.items():
if v.isdigit():
defines[k] = int(v)
for k, v in defines.items():
if isinstance(v, str) and '+' not in v:
defines[k] = defines[v]
for k, v in defines.items():
if isinstance(v, str) and '+' in v:
defines[k] = defines[v.split(' + ')[0]] + \
defines[v.split(' + ')[1]]
return defines
def _parse_enum(text):
enums = {}
LBRACE, RBRACE, EQ, COMMA, SEMICOLON = map(Suppress, "{}=,;")
_enum = Suppress("typedef enum")
identifier = Word(alphanums + "_+-")
enumValue = Group(
identifier("name") +
Optional(EQ + identifier("value")) +
Optional(COMMA) +
Optional(Suppress(cStyleComment)))
enumList = Group(enumValue + ZeroOrMore(enumValue))
enum = _enum + LBRACE + \
enumList("names") + RBRACE + identifier("enum") + SEMICOLON
for item, start, stop in enum.scanString(text):
l = []
for i, entry in enumerate(item.names):
if entry.value.isdigit():
entry.value = int(entry.value)
elif not entry.value:
entry.value = i
elif entry.value in ('false', 'true'):
entry.value = entry.value.capitalize()
l.append((entry.name, entry.value))
enums[item.enum] = l
return enums
def _gen_enum(enums):
out = ''
for name, l in enums.items():
out += f'ctypedef enum {name}:\n'
for identifier, value in l:
out += f' {identifier} = {value}\n'
out += '\n'
return out
def _parse_struct(text):
structs = {}
# syntax we don't want to see in the final parse tree
LBRACE, RBRACE, COMMA, SEMICOLON = map(Suppress, "{},;")
_struct = Literal("struct") ^ Literal("union")
const = Keyword("const")
dtype = Word(alphanums + "_*")
identifier = Word(alphanums + "_[]")
structDecl = Group(Optional(const("const")) +
dtype("dtype") + identifier("name") + SEMICOLON)
structList = Group(structDecl + ZeroOrMore(structDecl))
struct = _struct('struct') + identifier("struct_name") + LBRACE + \
structList("names") + RBRACE + SEMICOLON
for item, start, stop in struct.scanString(text):
l = []
for i, entry in enumerate(item.names):
l.append((entry.const, entry.dtype, entry.name))
structs[item.struct_name] = (item.struct, l)
return structs
def _gen_struct(structs):
out = ''
for name, (struct, l) in structs.items():
if name in STRUCTS or name.startswith('DvzGui'):
out += f'ctypedef {struct} {name}:\n'
for const, dtype, identifier in l:
if dtype == 'bool':
dtype = 'bint'
if const:
dtype = "const " + dtype
out += f' {dtype} {identifier}\n'
out += '\n'
return out
def _parse_func(text, is_output=False):
if is_output:
text = text[text.index(FUNCTION_START):text.index(FUNCTION_END)]
funcs = {}
LPAR, RPAR, LBRACE, RBRACE, COMMA, SEMICOLON = map(Suppress, "(){},;")
const = Keyword("const")
dtype = Word(alphanums + "_*")
identifier = Word(alphanums + "_")
argDecl = Group(
Optional(const("const")) +
dtype("dtype") +
Optional(identifier("name")
) + Optional(COMMA))
args = Group(ZeroOrMore(argDecl))
if not is_output:
func = Suppress("DVZ_EXPORT")
else:
func = Empty()
func = func + \
dtype("out") + \
identifier("name") + \
LPAR + args("args") + RPAR + \
Optional(SEMICOLON)
for item, start, stop in func.scanString(text):
args = []
for i, entry in enumerate(item.args):
args.append((entry.const, entry.dtype, entry.name))
funcs[item.name] = (item.out, tuple(args))
return funcs
@lru_cache(maxsize=64)
def _camel_to_snake(name):
name = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', name)
return re.sub('([a-z0-9])([A-Z])', r'\1_\2', name).lower()
def _gen_cython_func(name, func):
out, args = func
args_s = []
for const, dtype, argname in args:
if not argname:
if 'int' in dtype:
argname = 'n%d' % len(args_s)
elif dtype == 'float':
argname = 'value'
elif 'vec' in dtype:
argname = 'vec'
elif dtype == 'void*':
argname = 'buf'
elif 'char' in dtype:
argname = 's'
elif dtype == 'void':
dtype = ''
argname = ''
elif dtype == 'bool':
argname = 'value'
elif dtype == 'size_t':
argname = 'size'
elif 'Dvz' in dtype:
argname = _camel_to_snake(
dtype.replace('Dvz', '')).replace('*', '')
else:
raise ValueError(dtype)
if const:
dtype = "const " + dtype
if dtype == 'bool':
dtype = 'bint'
args_s.append(f'{dtype} {argname}')
args = ', '.join(args_s)
return f'{out} {name}({args})'
if __name__ == '__main__':
enums_to_insert = ''
structs_to_insert = ''
funcs_to_insert = ''
# Parse already-defined functions in the pxd
already_defined_funcs = _parse_func(
read_file(CYTHON_OUTPUT), is_output=True)
for filename in iter_header_files():
if filename.name not in HEADER_FILES:
continue
text = read_file(filename)
# Parse the enums
enums = _parse_enum(text)
# Generate the Cython enum definitions
generated = _gen_enum(enums)
if generated:
enums_to_insert += f'
if 'color' in str(filename):
defines = parse_defines(text)
for key, val in defines.items():
enums_to_insert = enums_to_insert.replace(key, str(val))
# Parse the structs
structs = _parse_struct(text)
# Generate the Cython struct definitions
generated = _gen_struct(structs)
if generated:
structs_to_insert += f'
if 'controls' in str(filename):
defines = parse_defines(text)
for key, val in defines.items():
structs_to_insert = structs_to_insert.replace(key, str(val))
# Parse the functions
funcs = _parse_func(text)
h = f'
funcs_to_insert += h
generated = ''
for name, func in funcs.items():
existing = already_defined_funcs.pop(name, None)
if not existing:
continue
generated = _gen_cython_func(name, func)
funcs_to_insert += generated + '\n'
if not generated:
funcs_to_insert = funcs_to_insert[:-len(h)]
else:
funcs_to_insert += '\n'
if already_defined_funcs.keys():
print(already_defined_funcs)
raise RuntimeError(
"Some Cython function bindings are missing, check gencython.py")
# Insert into the Cython file
insert_into_file(
CYTHON_OUTPUT, ENUM_START, ENUM_END, enums_to_insert)
insert_into_file(
CYTHON_OUTPUT, STRUCT_START, STRUCT_END, structs_to_insert)
# insert_into_file(
# CYTHON_OUTPUT, UNION_START, UNION_END, unions_to_insert)
insert_into_file(
CYTHON_OUTPUT, FUNCTION_START, FUNCTION_END, funcs_to_insert)
| true | true |
f71ebc6dd58650afeb83cd60ddcb615ad4faec6b | 27,401 | py | Python | disabled-challenges/adventure_game/poller/for-release/machine.py | pingjuiliao/cb-multios | 64ededd0b87030eda7c40c4388a4ad8283712d8e | [
"MIT"
] | 473 | 2016-08-01T12:48:16.000Z | 2022-03-09T18:13:14.000Z | disabled-challenges/adventure_game/poller/for-release/machine.py | pingjuiliao/cb-multios | 64ededd0b87030eda7c40c4388a4ad8283712d8e | [
"MIT"
] | 71 | 2016-08-01T03:33:44.000Z | 2022-03-09T18:37:04.000Z | disabled-challenges/adventure_game/poller/for-release/machine.py | pingjuiliao/cb-multios | 64ededd0b87030eda7c40c4388a4ad8283712d8e | [
"MIT"
] | 121 | 2016-08-01T04:07:53.000Z | 2022-03-07T11:08:09.000Z | # Plan for testing
# - Decide to use weak team or strong team (i.e. always lose battles or always win battles)
# - Random walk with bias towards tiles that have not been visited, or have been visited least recently
# - Quit after N number of steps, where N is a random number between 100 and 500
import array
import random
import struct
from cStringIO import StringIO
from generator.actions import Actions, Variable
KINDS = [
{ 'type': 'earth', 'name': 'Pheapxalm', 'max_health': 100, 'attack': 90, 'defense': 100,
'attacks': [('Scratch', 20), ('Cutting Leaf', 30)] },
{ 'type': 'wind', 'name': 'Eashtel', 'max_health': 110, 'attack': 90, 'defense': 90,
'attacks': [('Scratch', 20), ('Tornado', 40)] },
{ 'type': 'fire', 'name': 'Tacalm', 'max_health': 120, 'attack': 100, 'defense': 60,
'attacks': [('Scratch', 20), ('Bonfire', 50)] },
]
STATES = ['-KEABDCDCAADFDCPPABAAKBABPPADPOABPPALPOJEABPPBCPOIJABPPAGPOACPPAFPOABABANPOIJABPPAGPOABPPAGPOABPPAGPOAGPPABPOIKABPPACPOABPPABAGALPOAEPPAEPOABPPABACABPOIDABPPACPOAIPPAEPOABPPAGPOAGPPAGPOICABPPAGPOAGPPAGPOAKPPACPOABPPADPOICABPPABPOABCBACPOAHPPAIPOAIPPAIPOIBABPPAGPOAFPPAHPOACPPAEPOADPPABPOABADAEPOIDABPPABPOAEPPACPOAEPPACPOACPPADPOACPPAMPOIEABPPAGPOABCAABPOAHPPABAHBAPOIGABPPAGPOADPPADPOACPPBAPOAEPPAEPODKPPAIPOAFPPABCFACPOACPPAGPOABAEABAFAEPOACPPACPOAEPPACPOABPPAFPOACPPADPOCPPPAKPOAFPPABPOABPPACPOAGPPAHPOABPPAIPOAEPPADPOACPPADPOCPPPACPOACPPAFPOAGPPACPOABPPACPOAKPPABPOADPPBCPOABAJADPOCLPPADPOABPPAFPOAFPPADPOABPPADPOANPPAHPOABCHAMPOABAKADPOCIPPADPOABPPABPOABPPABPOABBPACPOAGPPACPOABPPAEPOAMPPBJPOABALCHPPACPOABPPAGPOAHPPACPOABPPACPOAHPPABPOABPPABPOABCKAMPOABCGAFPOABAIABPOABPPAIPOAKPPACPOABAMBFPPACPOABPPAEPOACPPAFPOAFPPADPOABPPBNPOABCJABCIABPOAEPPADPOAOPPAEPOBCPPAJPOADPPADPOABPPABPOAEPPABPOADPPBLPOABPPACPOAFPPAFPOAKPPAHPOBDPPAFPOACPPADPOACPPAEPOAEPPCAPOAFPPAHPOAJPPACPOABPPADPOABPPABPOBBPPAHPOABPPAEPOADPPAEPOAEPPAGPOABPPBHPOAGPPACPOABPPADPOAFPPACPOADPPABPOACPPACPOACPPABPOBAPPAIPOABPPALPOADPPAEPOADPPADPOABPPAFPOADPPAJPOAMPPABPOAGPPADPOADPPACPOACPPACPOABPPABPOBAPPAIPOACPPABPOABCCACPOABCDAFPOABPPAEPOAEPPAEPOABPPAFPOACPPAEPOACPPAEPOAKPPAMPOACPPAJPOBDPPAPPOABPPADPOABCEAFPPALPOABPPAFPOACPPACPOABCLAMPPBGPOBBPPAIPOADPPAGPOALPPAPPOADPPACPOAPPPBEPOBDPPADPOAJPPAEPOAHPPBBPOBHPPACPOABBDAJPOAFPPABPOBLPPAGPOAGPPANPOACPPAFPOBFPPAOPOAEPPACPOBJPPAHPOABPPACPOADPPAOPOAGPPACPOBDPPBFPOBJPPACPOACPPAKPOABCMAMPOAEPPABPOACPPABPOBDPPACPOABBCABPOABBEBAPOBIPPBNPOABPPAGPOBCPPBFPOABBBBDPPACPOACPPAJPOACPPAEPOACPPANPOACPPAFPOBCPPBGPOBCPPAEPOABCPACPOABPPAKPOABPPABPOABPPAFPOABPPADPOADPPABPOADPPAFPOBBPPBHPOBCPPABPOACPPACPOAFPPAGPOADPPAGPOABPPAFPOAGPPABPOBFPPANPOABBAAJPOBCPPAFPOAFPPAGPOACPPAFPOAEPPAFPOAEPPACPOABPPADPOAPPPADPOABPPACPOACPPAFPOABPPAIPOACPPABPOBBPPAGPOAGPPACPOABBOACPOAEPPABPOABCOAIPPADPOACPPAIPOAPPPABPOAHPPACPOACPPAEPOADPPACPOACPPABPOBBPPAEPOAKPPACPOAFPPBLPOALPPABPOAGPPACPOANPPACPOBBPPABPOAMPPAEPOAEPPBKPOABCNALPPABPOAGPPABPOAGPPAGPOACPPABPOBAPPACPOABDAALPPAFPOAEPPAJPOABBNABPOABBLAOPOAKPPADPOAGPPABPOAGPPAJPOAPPPADPOAHPPAJPOAEPPBDPOADPPABPOAOPPAFPOACPPADPOAHPPACPOACPPACPOABAOACPOBAPPAEPOABDBBLPOACPPACPOACPPABPOBDPPAFPOADPPAFPOABPPAEPOADPPAFPOBAPPACPOABPPAIPOABPPANPOAEPPADPOAGPPABPOANPPADPOABPPAMPOACPPABPOACPPACPOADPPAGPOBDPPAIPOABPPABBMALPOAHPPABPOAGPPADPOALPPBBPOABPPAOPOBDPPACPOABPPADPOABPPAJPOABPPAJPOABPPADPOADPPAFPOADPPABPOAHPPACPOAEPPANPOABAPALPOBJPPALPOABPPAPPOABPPAEPOACPPABPOABBIACPOACPPAIPOACPPADPOABBFAEPOABPPACPOAHPPAHPOBKPPBLPOABBKADPOAEPPAMPOACPPABPOABPPAMPOAEPPAFPOABPPABPOBMPPBEPOABPPADPOABDCAHPOABBJAKPOADPPAGPOABBGAEPOABPPABPOAFPPAJPOBKPPADPOABPPAFPOABPPAGPOAGPPABDDBAPOABBHACPOAJPPAGPOAIPPACPOABANACPPADPOABDEKFABPPAAACJEABAALEAHAFELGBHEGJGFAEAJFAGIGFGBHAHIGBGMGNAAABAAKMABKMABJLABKFABAJFAGIGFGBHAHIGBGMGNAAACAALEABLEABJMABKGABAHEFGBHDGIHEGFGMABACAALOABLOABJMABJMABAGFEGBGDGBGMGNACABAAIAACIAACKFABDNABCEKGADACJEABAAJABPAGEEHFHDHEGJGOADAHEFGBHDGIHEGFGMABBBAALGADLGADKLABKLABAHEFGBHDGIHEGFGMABBBAALGADLGADKLABKLABAHEFGBHDGIHEGFGMABBBAALGADLGADKLABKLABABCELGAEACJEABAAIABJAFECHCGFGOHEABAJFAGIGFGBHAHIGBGMGNAABBAAKMADKMADKLABLFABABCEIGABABCEIGABABCEICACABCEBOACJEABAALIEHAFECHCGFGOHEACAGFEGBGDGBGMGNACCMAAJIAHJIAHJAACKIABAHEFGBHDGIHEGFGMABCNAAJGAHJGAHIHACIHACACJEABAAKMEPAFEGGJGPGOGBACAJFAGIGFGBHAHIGBGMGNAADDAALMAHLMAHINACJHACAGFEGBGDGBGMGNACDAAALIAHLIAHJEACKMABACJEABAAIIGHAEFEGPGOHJAEAJFAGIGFGBHAHIGBGMGNAADMAAIEAJIEAJJGACKAACAJFAGIGFGBHAHIGBGMGNAADOAAJEAJJEAJJIACKCACAJFAGIGFGBHAHIGBGMGNAADMAAIEAJIEAJJGACKAACAHEFGBHDGIHEGFGMABDOAAJOAJJOAJJIACJIACACJEABAAKIIMABAFELGBHEGJGFAEAHEFGBHDGIHEGFGMABJHABAAKGAMKGAMLBACLBACAJFAGIGFGBHAHIGBGMGNAAJGABAAJEAMJEAMLAACLKACAGFEGBGDGBGMGNACJIABAALIAMLIAMLMACJEACAGFEGBGDGBGMGNACJIABAALIAMLIAMLMACJEACABCEIGAGABCEJKABACJEABAAKIHDAFEMGFHCGPHJACAGFEGBGDGBGMGNACIHABAALAAKLAAKKLACIDACAGFEGBGDGBGMGNACIJABAAIAALIAALKNACIFACABCEJOAFACJEABAALIGAAFEDGBHCGMHJADAHEFGBHDGIHEGFGMABDLAAIGAJIGAJJFACJFACAGFEGBGDGBGMGNACDKAAIIAJIIAJJOACLGABAJFAGIGFGBHAHIGBGMGNAADKAALEAILEAIJEACJOACACJEABAALAFNAFECHCGFGOHEACAJFAGIGFGBHAHIGBGMGNAADKAALEAILEAIJEACJOACAGFEGBGDGBGMGNACDKAAIIAJIIAJJOACLGABABCELOACACJEABAALMGOAFELGBHEGJGFACAHEFGBHDGIHEGFGMABIHABAAKGAKKGAKKBACKBACAJFAGIGFGBHAHIGBGMGNAAIEABAAIEAKIEAKJOACKIACABCEBOABCELCAFABCELKADABCELGAEABCEJKABACJEABAAKIEBAFEDGBHCGMHJAEAHEFGBHDGIHEGFGMABCGAAJOAGJOAGIAACIAACAHEFGBHDGIHEGFGMABCHAAKGAGKGAGIBACIBACAJFAGIGFGBHAHIGBGMGNAACEAAIEAGIEAGLOABIIACAGFEGBGDGBGMGNACCFAAKAAGKAAGIJACKBABABCEIGABACJEABAAIMEDAFELGBHEGJGFACAHEFGBHDGIHEGFGMABCLAAIGAHIGAHIFACIFACAGFEGBGDGBGMGNACCKAAIIAHIIAHIOACKGABACJEABAALMFFAGEEHFHDHEGJGOABAGFEGBGDGBGMGNACDIAALIAILIAIJMACLEABACJEABAAKMDGAEFEGPGOHJADAHEFGBHDGIHEGFGMABCBAALGAFLGAFLLABLLABAJFAGIGFGBHAHIGBGMGNAACDAALMAFLMAFLNABIHACAGFEGBGDGBGMGNACCDAAJAAGJAAGIHACJPABACJEABAAKACFAFELGBHEGJGFAEAHEFGBHDGIHEGFGMABBFAAJGAEJGAEKPABKPABAGFEGBGDGBGMGNACBGAAKIAEKIAELKABJCABAJFAGIGFGBHAHIGBGMGNAABEAAIEAEIEAEKOABLIABAJFAGIGFGBHAHIGBGMGNAABGAAJEAEJEAELAABLKABABCEJGACACJEABAAIAELAFECHCGFGOHEACAGFEGBGDGBGMGNACDAAALIAHLIAHJEACKMABAHEFGBHDGIHEGFGMABCOAAJOAHJOAHIIACIIACABCEKGADACJEABAAKADOAFELGBHEGJGFABAGFEGBGDGBGMGNACCEAAJIAGJIAGIIACKAABACJEABAALMCDAGEEHFHDHEGJGOABAHEFGBHDGIHEGFGMABBDAAIGAEIGAEKNABKNABABCELKADACJEABAAKMDGAFEMGFHCGPHJABAGFEGBGDGBGMGNACCBAAIAAGIAAGIFACJNABABCEICACABCEJOAFACJEABAALMCDAEFEGPGOHJACAGFEGBGDGBGMGNACBHAALAAELAAELLABJDABAHEFGBHDGIHEGFGMABBFAAJGAEJGAEKPABKPABABCELKADABCELOACACJEABAAIIEOAGFDHJGMHGGJGBABAGFEGBGDGBGMGNACDDAAJAAIJAAIJHACKPABACJEABAAIIEOAFECHCGFGOHEABAJFAGIGFGBHAHIGBGMGNAADAAAKEAHKEAHIKACJEACABCEIGABABCEIKAFACJEABAAIEHCAGEEHFHDHEGJGOAEAJFAGIGFGBHAHIGBGMGNAAIFABAAIMAKIMAKJPACKJACAJFAGIGFGBHAHIGBGMGNAAIGABAAJEAKJEAKKAACKKACAHEFGBHDGIHEGFGMABIDABAAIGAKIGAKJNACJNACAHEFGBHDGIHEGFGMABIGABAAJOAKJOAKKAACKAACACJEABAAIAELAGEEHFHDHEGJGOACAHEFGBHDGIHEGFGMABCNAAJGAHJGAHIHACIHACAJFAGIGFGBHAHIGBGMGNAACPAAJMAHJMAHIJACJDACABCEJGACADDCAACKAAABAHEFGBHDGIHEGFGMABADAAIGACIGACJNABJNABAAMNCC--']
def dearmor(s):
s = s[1:-2]
out = StringIO()
for i in xrange(0, len(s), 2):
b = (ord(s[i]) - ord('A')) & 0xf
b = (b << 4) | ((ord(s[i+1]) - ord('A')) & 0xf)
out.write(chr(b))
return out.getvalue()[:-2] # exclude checksum
class Deserializer(object):
def __init__(self, f):
if isinstance(f, str):
f = StringIO(f)
self.f = f
def u8(self):
return struct.unpack('<B', self.f.read(1))[0]
def u16(self):
return struct.unpack('<H', self.f.read(2))[0]
def u32(self):
return struct.unpack('<I', self.f.read(4))[0]
def number(self):
b = self.u8()
neg = b & 0x40
val = b & 0x3F
shift = 6
while b & 0x80:
b = self.u8()
val |= (b & 0x7f) << shift
if neg:
val = -val
return val
def string(self):
count = self.number()
return self.f.read(count)
class Monster(object):
def __init__(self, d):
self.name = d.string()
self.kind_id = d.number()
self.kind = KINDS[self.kind_id]
self.level = d.number()
self.xp = d.number()
self.health = d.number()
self.max_health = d.number()
self.attack = d.number()
self.defense = d.number()
class GenMonster(object):
def __init__(self, kind_id, level):
self.level = level
self.kind_id = kind_id
self.kind = KINDS[kind_id]
self.name = self.kind['name']
self.max_health = self.kind['max_health'] + level * 8
self.attack = self.kind['attack'] + level * 1
self.defense = self.kind['defense'] + level * 1
self.xp = 0
self.health = self.max_health
class Vet(object):
def __init__(self, d):
self.icon = 'V'
class Trainer(object):
def __init__(self, d):
self.icon = chr(d.number())
self.player_won = bool(d.number())
self.reward = d.number()
self.name = d.string()
self.team = []
for i in xrange(d.number()):
self.team.append(Monster(d))
class Treasure(object):
def __init__(self, d):
self.icon = chr(d.number())
self.avail = self.icon == '$'
self.value = d.number()
class NameChanger(object):
def __init__(self, d):
self.icon = 'N'
class Empty(object):
def __init__(self):
self.icon = ' '
class Wall(object):
def __init__(self):
self.icon = '#'
class Player(object):
def __init__(self, d):
self.x = d.number()
self.y = d.number()
self.icon = chr(d.u8())
self.money = d.number()
self.team = []
for i in xrange(d.number()):
self.team.append(Monster(d))
self.bank = []
for i in xrange(d.number()):
self.bank.append(Monster(d))
class Map(object):
def __init__(self, d):
self.width = d.number()
self.height = d.number()
self.start_x = d.number()
self.start_y = d.number()
self.next_id = d.number()
self.tiles = array.array('B')
i = 0
while i < self.width * self.height:
count = d.number()
if count < 0:
raise Exception('Bad RLE count')
tile = d.u8()
for j in xrange(count):
self.tiles.append(tile)
i += count
if i != self.width * self.height:
raise Exception('Bad RLE')
self.objects = {}
for i in xrange(self.next_id):
self.objects[i] = self.decode_mapobject(d)
self.objects[0xFE] = Empty()
self.objects[0xFF] = Wall()
self.player = Player(d)
def decode_mapobject(self, d):
type_ = d.number()
if type_ == 0:
return Vet(d)
elif type_ == 1:
return Treasure(d)
elif type_ == 2:
return Trainer(d)
elif type_ == 3:
return NameChanger(d)
else:
raise Exception('Bad map object type (%d)' % type_)
def get_tile(self, x, y):
return self.objects[self.tiles[x + y * self.width]]
def display(self):
out = StringIO()
out.write('\x1B[2J')
out.write('Money: %6d\n' % self.player.money)
for y in xrange(self.height):
for x in xrange(self.width):
if (x, y) == (self.player.x, self.player.y):
out.write('*')
else:
out.write(self.get_tile(x, y).icon)
out.write('\n')
return out.getvalue()
class Random(object):
def __init__(self, state0, state1):
self.state0 = 168642907
self.state1 = 182064376
def _update(self):
if self.state0 & 1:
self.state0 ^= 0x29D1E9EB << 1
self.state0 >>= 1
if self.state1 & 1:
self.state1 ^= 0xF5CDE95 << 1
self.state1 >>= 1
def _rand(self):
self._update()
return (self.state0 ^ self.state1) & 0xFFFF
def randint(self, upper):
r = self._rand()
while r > 0x10000 / upper * upper - 1:
r = self._rand()
return r % upper
class Game(object):
def __init__(self, state_string, recv, send):
self.MAX_STEPS = 600 #int(random.paretovariate(2) * 30)#random.randint(50, 200)
self.recv = recv
self.send = send
self.map = Map(Deserializer(dearmor(state_string)))
self.rand = Random(168642907, 182064376)
def current(self):
return (self.map.player.x, self.map.player.y)
def calc_weight(self, p):
x, y, d = p
if (x, y) not in self.visited:
# we have never visited this tile, give max weight
return 5.0
dt = min(self.time - self.visited[(x, y)], 40)
return dt / 10.0
def next_step(self):
# all possible results
OPTIONS = [(1, 0, 'd'), (-1, 0, 'a'), (0, 1, 's'), (0, -1, 'w')]
self.visited[self.current()] = self.time
self.time += 1
# calculate real options
options = map(lambda p: (p[0] + self.map.player.x, p[1] + self.map.player.y, p[2]), OPTIONS)
# filter to get valid options
options = filter(lambda p: 0 <= p[0] < self.map.width, options)
options = filter(lambda p: 0 <= p[1] < self.map.height, options)
options = filter(lambda p: not isinstance(self.map.get_tile(p[0], p[1]), Wall), options)
# add weights
options = map(lambda p: (p, self.calc_weight(p)), options)
# from stack overflow... a weighted random choice
total = sum(weight for p, weight in options)
r = random.uniform(0, total)
upto = 0
for p, weight in options:
if upto + weight > r:
return p
upto += weight
def handle_vet(self):
self.recv("Welcome to Vet Center! How can we help you?\n")
while True:
self.recv("Menu: (G)ood-bye, (H)eal, (S)tore a Monster, (W)ithdraw a Monster\n")
self.send('g')
break
def handle_treasure(self, t):
if t.avail:
self.map.player.money += t.value
t.avail = False
t.icon = ' '
def handle_trainer(self, t):
if t.player_won:
return
self.recv("New challenger! Trainer %s prepares to battle!\n" % t.name)
t.player_won = self.battle(t.team)
if t.player_won:
self.map.player.money += t.reward
t.icon = ' '
def handle_wild(self):
level = self.get_level(*self.current())
m = self.random_monster(level)
self.recv("A wild %s attacks!\n" % m.name)
self.battle([m], wild=True)
def handle(self, tile):
if isinstance(tile, Empty):
return True
elif isinstance(tile, Wall):
return False
elif isinstance(tile, Vet):
self.handle_vet()
return False
elif isinstance(tile, Treasure):
self.handle_treasure(tile)
return True
elif isinstance(tile, Trainer):
self.handle_trainer(tile)
return tile.player_won
else:
raise Exception('Bad tile type')
def handle_death(self):
alive = list(filter(lambda m: m.health > 0, self.map.player.team))
if len(alive):
return
self.map.player.money /= 2
self.map.player.x, self.map.player.y = self.map.start_x, self.map.start_y
for m in self.map.player.team:
m.health = m.max_health
def random_walk(self):
self.time = 1
self.visited = {}
steps = 0
self.recv(self.map.display())
while steps < self.MAX_STEPS:
steps += 1
x, y, direction = self.next_step()
self.send(direction)
new_tile = self.map.get_tile(x, y)
if self.handle(new_tile):
self.map.player.x = x
self.map.player.y = y
if self.rand.randint(10) == 0:
# wild battle!
self.handle_wild()
self.handle_death()
self.recv(self.map.display())
def save_game(self, state):
self.recv(self.map.display())
self.send('1')
self.recv('%s\nPress (C) to continue...\n' % state)
self.send('c')
def calc_dmg(self, attacker, defender, power):
def multiplier(a, d):
COMBOS = {
('earth', 'wind'): 200,
('wind', 'fire'): 200,
('fire', 'earth'): 200,
('wind', 'earth'): 50,
('fire', 'wind'): 50,
('earth', 'fire'): 50,
('earth', 'earth'): 100,
('wind', 'wind'): 100,
('fire', 'fire'): 100
}
return COMBOS[(a, d)]
dmg = attacker.attack - defender.defense + power * multiplier(attacker.kind['type'], defender.kind['type']) / 100
dmg = dmg * self.rand.randint(110) / 100
if dmg > 0:
defender.health = max(0, defender.health - dmg)
def capture(self, m):
if len(self.map.player.team) < 6:
self.map.player.team.append(m)
elif len(self.map.player.bank) < 99:
self.map.player.bank.append(m)
else:
self.recv("No space on team or in bank. %s was abandoned.\n" % m.name)
def gain_xp(self, pm, em):
xp = em.level * 100
pm.xp += xp
to_level_up = 100 * pm.level * pm.level / 4
if pm.xp > to_level_up:
pm.xp -= to_level_up
if pm.level < 99:
pm.level += 1
bonus = self.rand.randint(299)
pm.max_health += (800 + bonus) / 100
pm.attack += (100 + bonus) / 100
pm.defense += (100 + bonus) / 100
pm.health = pm.max_health
def battle(self, enemy, wild=False):
player = list(filter(lambda m: m.health > 0, self.map.player.team))
enemy = list(enemy)
for m in enemy:
m.health = m.max_health
while len(player) and len(enemy):
pm, em = player[0], enemy[0]
self.recv("%s (Lvl %d) versus %s (Lvl %d)\n" % (pm.name, pm.level, em.name, em.level))
while True:
defending = False
self.recv("\nPlayer: %s\n Health %d / %d\n" % (pm.name, pm.health, pm.max_health))
self.recv("\nEnemy: %s\n Health %d / %d\n" % (em.name, em.health, em.max_health))
self.recv("Menu: (A)ttack, (D)efend, (C)apture, (R)etreat\n")
r = random.randint(0, 99)
if r < 50: #attack
self.send("a")
for i in xrange(len(pm.kind['attacks'])):
name, power = pm.kind['attacks'][i]
self.recv("%d. %s (%d)\n" % (i+1, name, power))
self.recv("What attack to perform?\n")
i = random.randint(1, len(pm.kind['attacks']))
self.send(str(i))
name, power = pm.kind['attacks'][i-1]
self.calc_dmg(pm, em, power)
if em.health == 0:
self.gain_xp(pm, em)
del enemy[0]
break
elif r < 70: #defend
self.send("d")
defending = True
self.recv("%s takes a defensive stance.\n" % pm.name)
elif r < 90: #capture
self.send("c")
if wild and em.health * 10 < em.max_health and self.rand.randint(100) < 70:
self.recv("Successfully captured %s!\n" % em.name)
self.capture(em)
del enemy[0]
player = list(filter(lambda m: m.health > 0, self.map.player.team))
break
else:
self.recv("Unable to capture %s.\n" % em.name)
else: #retreat
self.send("r")
if self.rand.randint(100) < 30:
self.recv("Successful retreat!\n")
return False
self.recv("Unable to retreat...\n")
name, power = em.kind['attacks'][self.rand.randint(len(em.kind['attacks']))]
self.calc_dmg(em, pm, power * (70 if defending else 100) / 100)
if pm.health == 0:
del player[0]
break
return len(enemy) == 0
def get_level(self, x, y):
max_dist = abs(self.map.width - self.map.start_x) + abs(self.map.height - self.map.start_y)
dist = abs(x - self.map.start_x) + abs(y - self.map.start_y)
return max(1, 99 * dist / max_dist)
def random_monster(self, level):
kind_id = self.rand.randint(len(KINDS))
return GenMonster(kind_id, level)
class A(Actions):
def start(self):
self.state['state'] = 0
def string_recv(self, s):
# all strings are new-line delimited, so let's just split on that
for l in s.splitlines(True):
assert l[-1] == '\n'
self.read(delim='\n', expect=l)
#self.read(length=len(s), expect=s)
def string_send(self, s):
self.write(s)
def doit(self):
g = Game(STATES[self.state['state']], self.string_recv, self.string_send)
g.save_game('-KEABDCDCAADFDCPPABAAKBABPPADPOABPPALPOJEABPPBCPOIJABPPAGPOACPPAFPOABABANPOIJABPPAGPOABPPAGPOABPPAGPOAGPPABPOIKABPPACPOABPPABAGALPOAEPPAEPOABPPABACABPOIDABPPACPOAIPPAEPOABPPAGPOAGPPAGPOICABPPAGPOAGPPAGPOAKPPACPOABPPADPOICABPPABPOABCBACPOAHPPAIPOAIPPAIPOIBABPPAGPOAFPPAHPOACPPAEPOADPPABPOABADAEPOIDABPPABPOAEPPACPOAEPPACPOACPPADPOACPPAMPOIEABPPAGPOABCAABPOAHPPABAHBAPOIGABPPAGPOADPPADPOACPPBAPOAEPPAEPODKPPAIPOAFPPABCFACPOACPPAGPOABAEABAFAEPOACPPACPOAEPPACPOABPPAFPOACPPADPOCPPPAKPOAFPPABPOABPPACPOAGPPAHPOABPPAIPOAEPPADPOACPPADPOCPPPACPOACPPAFPOAGPPACPOABPPACPOAKPPABPOADPPBCPOABAJADPOCLPPADPOABPPAFPOAFPPADPOABPPADPOANPPAHPOABCHAMPOABAKADPOCIPPADPOABPPABPOABPPABPOABBPACPOAGPPACPOABPPAEPOAMPPBJPOABALCHPPACPOABPPAGPOAHPPACPOABPPACPOAHPPABPOABPPABPOABCKAMPOABCGAFPOABAIABPOABPPAIPOAKPPACPOABAMBFPPACPOABPPAEPOACPPAFPOAFPPADPOABPPBNPOABCJABCIABPOAEPPADPOAOPPAEPOBCPPAJPOADPPADPOABPPABPOAEPPABPOADPPBLPOABPPACPOAFPPAFPOAKPPAHPOBDPPAFPOACPPADPOACPPAEPOAEPPCAPOAFPPAHPOAJPPACPOABPPADPOABPPABPOBBPPAHPOABPPAEPOADPPAEPOAEPPAGPOABPPBHPOAGPPACPOABPPADPOAFPPACPOADPPABPOACPPACPOACPPABPOBAPPAIPOABPPALPOADPPAEPOADPPADPOABPPAFPOADPPAJPOAMPPABPOAGPPADPOADPPACPOACPPACPOABPPABPOBAPPAIPOACPPABPOABCCACPOABCDAFPOABPPAEPOAEPPAEPOABPPAFPOACPPAEPOACPPAEPOAKPPAMPOACPPAJPOBDPPAPPOABPPADPOABCEAFPPALPOABPPAFPOACPPACPOABCLAMPPBGPOBBPPAIPOADPPAGPOALPPAPPOADPPACPOAPPPBEPOBDPPADPOAJPPAEPOAHPPBBPOBHPPACPOABBDAJPOAFPPABPOBLPPAGPOAGPPANPOACPPAFPOBFPPAOPOAEPPACPOBJPPAHPOABPPACPOADPPAOPOAGPPACPOBDPPBFPOBJPPACPOACPPAKPOABCMAMPOAEPPABPOACPPABPOBDPPACPOABBCABPOABBEBAPOBIPPBNPOABPPAGPOBCPPBFPOABBBBDPPACPOACPPAJPOACPPAEPOACPPANPOACPPAFPOBCPPBGPOBCPPAEPOABCPACPOABPPAKPOABPPABPOABPPAFPOABPPADPOADPPABPOADPPAFPOBBPPBHPOBCPPABPOACPPACPOAFPPAGPOADPPAGPOABPPAFPOAGPPABPOBFPPANPOABBAAJPOBCPPAFPOAFPPAGPOACPPAFPOAEPPAFPOAEPPACPOABPPADPOAPPPADPOABPPACPOACPPAFPOABPPAIPOACPPABPOBBPPAGPOAGPPACPOABBOACPOAEPPABPOABCOAIPPADPOACPPAIPOAPPPABPOAHPPACPOACPPAEPOADPPACPOACPPABPOBBPPAEPOAKPPACPOAFPPBLPOALPPABPOAGPPACPOANPPACPOBBPPABPOAMPPAEPOAEPPBKPOABCNALPPABPOAGPPABPOAGPPAGPOACPPABPOBAPPACPOABDAALPPAFPOAEPPAJPOABBNABPOABBLAOPOAKPPADPOAGPPABPOAGPPAJPOAPPPADPOAHPPAJPOAEPPBDPOADPPABPOAOPPAFPOACPPADPOAHPPACPOACPPACPOABAOACPOBAPPAEPOABDBBLPOACPPACPOACPPABPOBDPPAFPOADPPAFPOABPPAEPOADPPAFPOBAPPACPOABPPAIPOABPPANPOAEPPADPOAGPPABPOANPPADPOABPPAMPOACPPABPOACPPACPOADPPAGPOBDPPAIPOABPPABBMALPOAHPPABPOAGPPADPOALPPBBPOABPPAOPOBDPPACPOABPPADPOABPPAJPOABPPAJPOABPPADPOADPPAFPOADPPABPOAHPPACPOAEPPANPOABAPALPOBJPPALPOABPPAPPOABPPAEPOACPPABPOABBIACPOACPPAIPOACPPADPOABBFAEPOABPPACPOAHPPAHPOBKPPBLPOABBKADPOAEPPAMPOACPPABPOABPPAMPOAEPPAFPOABPPABPOBMPPBEPOABPPADPOABDCAHPOABBJAKPOADPPAGPOABBGAEPOABPPABPOAFPPAJPOBKPPADPOABPPAFPOABPPAGPOAGPPABDDBAPOABBHACPOAJPPAGPOAIPPACPOABANACPPADPOABDEKFABPPAAACJEABAALEAHAFELGBHEGJGFAEAJFAGIGFGBHAHIGBGMGNAAABAAKMABKMABJLABKFABAJFAGIGFGBHAHIGBGMGNAAACAALEABLEABJMABKGABAHEFGBHDGIHEGFGMABACAALOABLOABJMABJMABAGFEGBGDGBGMGNACABAAIAACIAACKFABDNABCEKGADACJEABAAJABPAGEEHFHDHEGJGOADAHEFGBHDGIHEGFGMABBBAALGADLGADKLABKLABAHEFGBHDGIHEGFGMABBBAALGADLGADKLABKLABAHEFGBHDGIHEGFGMABBBAALGADLGADKLABKLABABCELGAEACJEABAAIABJAFECHCGFGOHEABAJFAGIGFGBHAHIGBGMGNAABBAAKMADKMADKLABLFABABCEIGABABCEIGABABCEICACABCEBOACJEABAALIEHAFECHCGFGOHEACAGFEGBGDGBGMGNACCMAAJIAHJIAHJAACKIABAHEFGBHDGIHEGFGMABCNAAJGAHJGAHIHACIHACACJEABAAKMEPAFEGGJGPGOGBACAJFAGIGFGBHAHIGBGMGNAADDAALMAHLMAHINACJHACAGFEGBGDGBGMGNACDAAALIAHLIAHJEACKMABACJEABAAIIGHAEFEGPGOHJAEAJFAGIGFGBHAHIGBGMGNAADMAAIEAJIEAJJGACKAACAJFAGIGFGBHAHIGBGMGNAADOAAJEAJJEAJJIACKCACAJFAGIGFGBHAHIGBGMGNAADMAAIEAJIEAJJGACKAACAHEFGBHDGIHEGFGMABDOAAJOAJJOAJJIACJIACACJEABAAKIIMABAFELGBHEGJGFAEAHEFGBHDGIHEGFGMABJHABAAKGAMKGAMLBACLBACAJFAGIGFGBHAHIGBGMGNAAJGABAAJEAMJEAMLAACLKACAGFEGBGDGBGMGNACJIABAALIAMLIAMLMACJEACAGFEGBGDGBGMGNACJIABAALIAMLIAMLMACJEACABCEIGAGABCEJKABACJEABAAKIHDAFEMGFHCGPHJACAGFEGBGDGBGMGNACIHABAALAAKLAAKKLACIDACAGFEGBGDGBGMGNACIJABAAIAALIAALKNACIFACABCEJOAFACJEABAALIGAAFEDGBHCGMHJADAHEFGBHDGIHEGFGMABDLAAIGAJIGAJJFACJFACAGFEGBGDGBGMGNACDKAAIIAJIIAJJOACLGABAJFAGIGFGBHAHIGBGMGNAADKAALEAILEAIJEACJOACACJEABAALAFNAFECHCGFGOHEACAJFAGIGFGBHAHIGBGMGNAADKAALEAILEAIJEACJOACAGFEGBGDGBGMGNACDKAAIIAJIIAJJOACLGABABCELOACACJEABAALMGOAFELGBHEGJGFACAHEFGBHDGIHEGFGMABIHABAAKGAKKGAKKBACKBACAJFAGIGFGBHAHIGBGMGNAAIEABAAIEAKIEAKJOACKIACABCEBOABCELCAFABCELKADABCELGAEABCEJKABACJEABAAKIEBAFEDGBHCGMHJAEAHEFGBHDGIHEGFGMABCGAAJOAGJOAGIAACIAACAHEFGBHDGIHEGFGMABCHAAKGAGKGAGIBACIBACAJFAGIGFGBHAHIGBGMGNAACEAAIEAGIEAGLOABIIACAGFEGBGDGBGMGNACCFAAKAAGKAAGIJACKBABABCEIGABACJEABAAIMEDAFELGBHEGJGFACAHEFGBHDGIHEGFGMABCLAAIGAHIGAHIFACIFACAGFEGBGDGBGMGNACCKAAIIAHIIAHIOACKGABACJEABAALMFFAGEEHFHDHEGJGOABAGFEGBGDGBGMGNACDIAALIAILIAIJMACLEABACJEABAAKMDGAEFEGPGOHJADAHEFGBHDGIHEGFGMABCBAALGAFLGAFLLABLLABAJFAGIGFGBHAHIGBGMGNAACDAALMAFLMAFLNABIHACAGFEGBGDGBGMGNACCDAAJAAGJAAGIHACJPABACJEABAAKACFAFELGBHEGJGFAEAHEFGBHDGIHEGFGMABBFAAJGAEJGAEKPABKPABAGFEGBGDGBGMGNACBGAAKIAEKIAELKABJCABAJFAGIGFGBHAHIGBGMGNAABEAAIEAEIEAEKOABLIABAJFAGIGFGBHAHIGBGMGNAABGAAJEAEJEAELAABLKABABCEJGACACJEABAAIAELAFECHCGFGOHEACAGFEGBGDGBGMGNACDAAALIAHLIAHJEACKMABAHEFGBHDGIHEGFGMABCOAAJOAHJOAHIIACIIACABCEKGADACJEABAAKADOAFELGBHEGJGFABAGFEGBGDGBGMGNACCEAAJIAGJIAGIIACKAABACJEABAALMCDAGEEHFHDHEGJGOABAHEFGBHDGIHEGFGMABBDAAIGAEIGAEKNABKNABABCELKADACJEABAAKMDGAFEMGFHCGPHJABAGFEGBGDGBGMGNACCBAAIAAGIAAGIFACJNABABCEICACABCEJOAFACJEABAALMCDAEFEGPGOHJACAGFEGBGDGBGMGNACBHAALAAELAAELLABJDABAHEFGBHDGIHEGFGMABBFAAJGAEJGAEKPABKPABABCELKADABCELOACACJEABAAIIEOAGFDHJGMHGGJGBABAGFEGBGDGBGMGNACDDAAJAAIJAAIJHACKPABACJEABAAIIEOAFECHCGFGOHEABAJFAGIGFGBHAHIGBGMGNAADAAAKEAHKEAHIKACJEACABCEIGABABCEIKAFACJEABAAIEHCAGEEHFHDHEGJGOAEAJFAGIGFGBHAHIGBGMGNAAIFABAAIMAKIMAKJPACKJACAJFAGIGFGBHAHIGBGMGNAAIGABAAJEAKJEAKKAACKKACAHEFGBHDGIHEGFGMABIDABAAIGAKIGAKJNACJNACAHEFGBHDGIHEGFGMABIGABAAJOAKJOAKKAACKAACACJEABAAIAELAGEEHFHDHEGJGOACAHEFGBHDGIHEGFGMABCNAAJGAHJGAHIHACIHACAJFAGIGFGBHAHIGBGMGNAACPAAJMAHJMAHIJACJDACABCEJGACADDCAACKAAABAHEFGBHDGIHEGFGMABADAAIGACIGACJNABJNABAAMNCC--')
g.random_walk()
| 57.686316 | 5,948 | 0.720923 |
import array
import random
import struct
from cStringIO import StringIO
from generator.actions import Actions, Variable
KINDS = [
{ 'type': 'earth', 'name': 'Pheapxalm', 'max_health': 100, 'attack': 90, 'defense': 100,
'attacks': [('Scratch', 20), ('Cutting Leaf', 30)] },
{ 'type': 'wind', 'name': 'Eashtel', 'max_health': 110, 'attack': 90, 'defense': 90,
'attacks': [('Scratch', 20), ('Tornado', 40)] },
{ 'type': 'fire', 'name': 'Tacalm', 'max_health': 120, 'attack': 100, 'defense': 60,
'attacks': [('Scratch', 20), ('Bonfire', 50)] },
]
STATES = ['-KEABDCDCAADFDCPPABAAKBABPPADPOABPPALPOJEABPPBCPOIJABPPAGPOACPPAFPOABABANPOIJABPPAGPOABPPAGPOABPPAGPOAGPPABPOIKABPPACPOABPPABAGALPOAEPPAEPOABPPABACABPOIDABPPACPOAIPPAEPOABPPAGPOAGPPAGPOICABPPAGPOAGPPAGPOAKPPACPOABPPADPOICABPPABPOABCBACPOAHPPAIPOAIPPAIPOIBABPPAGPOAFPPAHPOACPPAEPOADPPABPOABADAEPOIDABPPABPOAEPPACPOAEPPACPOACPPADPOACPPAMPOIEABPPAGPOABCAABPOAHPPABAHBAPOIGABPPAGPOADPPADPOACPPBAPOAEPPAEPODKPPAIPOAFPPABCFACPOACPPAGPOABAEABAFAEPOACPPACPOAEPPACPOABPPAFPOACPPADPOCPPPAKPOAFPPABPOABPPACPOAGPPAHPOABPPAIPOAEPPADPOACPPADPOCPPPACPOACPPAFPOAGPPACPOABPPACPOAKPPABPOADPPBCPOABAJADPOCLPPADPOABPPAFPOAFPPADPOABPPADPOANPPAHPOABCHAMPOABAKADPOCIPPADPOABPPABPOABPPABPOABBPACPOAGPPACPOABPPAEPOAMPPBJPOABALCHPPACPOABPPAGPOAHPPACPOABPPACPOAHPPABPOABPPABPOABCKAMPOABCGAFPOABAIABPOABPPAIPOAKPPACPOABAMBFPPACPOABPPAEPOACPPAFPOAFPPADPOABPPBNPOABCJABCIABPOAEPPADPOAOPPAEPOBCPPAJPOADPPADPOABPPABPOAEPPABPOADPPBLPOABPPACPOAFPPAFPOAKPPAHPOBDPPAFPOACPPADPOACPPAEPOAEPPCAPOAFPPAHPOAJPPACPOABPPADPOABPPABPOBBPPAHPOABPPAEPOADPPAEPOAEPPAGPOABPPBHPOAGPPACPOABPPADPOAFPPACPOADPPABPOACPPACPOACPPABPOBAPPAIPOABPPALPOADPPAEPOADPPADPOABPPAFPOADPPAJPOAMPPABPOAGPPADPOADPPACPOACPPACPOABPPABPOBAPPAIPOACPPABPOABCCACPOABCDAFPOABPPAEPOAEPPAEPOABPPAFPOACPPAEPOACPPAEPOAKPPAMPOACPPAJPOBDPPAPPOABPPADPOABCEAFPPALPOABPPAFPOACPPACPOABCLAMPPBGPOBBPPAIPOADPPAGPOALPPAPPOADPPACPOAPPPBEPOBDPPADPOAJPPAEPOAHPPBBPOBHPPACPOABBDAJPOAFPPABPOBLPPAGPOAGPPANPOACPPAFPOBFPPAOPOAEPPACPOBJPPAHPOABPPACPOADPPAOPOAGPPACPOBDPPBFPOBJPPACPOACPPAKPOABCMAMPOAEPPABPOACPPABPOBDPPACPOABBCABPOABBEBAPOBIPPBNPOABPPAGPOBCPPBFPOABBBBDPPACPOACPPAJPOACPPAEPOACPPANPOACPPAFPOBCPPBGPOBCPPAEPOABCPACPOABPPAKPOABPPABPOABPPAFPOABPPADPOADPPABPOADPPAFPOBBPPBHPOBCPPABPOACPPACPOAFPPAGPOADPPAGPOABPPAFPOAGPPABPOBFPPANPOABBAAJPOBCPPAFPOAFPPAGPOACPPAFPOAEPPAFPOAEPPACPOABPPADPOAPPPADPOABPPACPOACPPAFPOABPPAIPOACPPABPOBBPPAGPOAGPPACPOABBOACPOAEPPABPOABCOAIPPADPOACPPAIPOAPPPABPOAHPPACPOACPPAEPOADPPACPOACPPABPOBBPPAEPOAKPPACPOAFPPBLPOALPPABPOAGPPACPOANPPACPOBBPPABPOAMPPAEPOAEPPBKPOABCNALPPABPOAGPPABPOAGPPAGPOACPPABPOBAPPACPOABDAALPPAFPOAEPPAJPOABBNABPOABBLAOPOAKPPADPOAGPPABPOAGPPAJPOAPPPADPOAHPPAJPOAEPPBDPOADPPABPOAOPPAFPOACPPADPOAHPPACPOACPPACPOABAOACPOBAPPAEPOABDBBLPOACPPACPOACPPABPOBDPPAFPOADPPAFPOABPPAEPOADPPAFPOBAPPACPOABPPAIPOABPPANPOAEPPADPOAGPPABPOANPPADPOABPPAMPOACPPABPOACPPACPOADPPAGPOBDPPAIPOABPPABBMALPOAHPPABPOAGPPADPOALPPBBPOABPPAOPOBDPPACPOABPPADPOABPPAJPOABPPAJPOABPPADPOADPPAFPOADPPABPOAHPPACPOAEPPANPOABAPALPOBJPPALPOABPPAPPOABPPAEPOACPPABPOABBIACPOACPPAIPOACPPADPOABBFAEPOABPPACPOAHPPAHPOBKPPBLPOABBKADPOAEPPAMPOACPPABPOABPPAMPOAEPPAFPOABPPABPOBMPPBEPOABPPADPOABDCAHPOABBJAKPOADPPAGPOABBGAEPOABPPABPOAFPPAJPOBKPPADPOABPPAFPOABPPAGPOAGPPABDDBAPOABBHACPOAJPPAGPOAIPPACPOABANACPPADPOABDEKFABPPAAACJEABAALEAHAFELGBHEGJGFAEAJFAGIGFGBHAHIGBGMGNAAABAAKMABKMABJLABKFABAJFAGIGFGBHAHIGBGMGNAAACAALEABLEABJMABKGABAHEFGBHDGIHEGFGMABACAALOABLOABJMABJMABAGFEGBGDGBGMGNACABAAIAACIAACKFABDNABCEKGADACJEABAAJABPAGEEHFHDHEGJGOADAHEFGBHDGIHEGFGMABBBAALGADLGADKLABKLABAHEFGBHDGIHEGFGMABBBAALGADLGADKLABKLABAHEFGBHDGIHEGFGMABBBAALGADLGADKLABKLABABCELGAEACJEABAAIABJAFECHCGFGOHEABAJFAGIGFGBHAHIGBGMGNAABBAAKMADKMADKLABLFABABCEIGABABCEIGABABCEICACABCEBOACJEABAALIEHAFECHCGFGOHEACAGFEGBGDGBGMGNACCMAAJIAHJIAHJAACKIABAHEFGBHDGIHEGFGMABCNAAJGAHJGAHIHACIHACACJEABAAKMEPAFEGGJGPGOGBACAJFAGIGFGBHAHIGBGMGNAADDAALMAHLMAHINACJHACAGFEGBGDGBGMGNACDAAALIAHLIAHJEACKMABACJEABAAIIGHAEFEGPGOHJAEAJFAGIGFGBHAHIGBGMGNAADMAAIEAJIEAJJGACKAACAJFAGIGFGBHAHIGBGMGNAADOAAJEAJJEAJJIACKCACAJFAGIGFGBHAHIGBGMGNAADMAAIEAJIEAJJGACKAACAHEFGBHDGIHEGFGMABDOAAJOAJJOAJJIACJIACACJEABAAKIIMABAFELGBHEGJGFAEAHEFGBHDGIHEGFGMABJHABAAKGAMKGAMLBACLBACAJFAGIGFGBHAHIGBGMGNAAJGABAAJEAMJEAMLAACLKACAGFEGBGDGBGMGNACJIABAALIAMLIAMLMACJEACAGFEGBGDGBGMGNACJIABAALIAMLIAMLMACJEACABCEIGAGABCEJKABACJEABAAKIHDAFEMGFHCGPHJACAGFEGBGDGBGMGNACIHABAALAAKLAAKKLACIDACAGFEGBGDGBGMGNACIJABAAIAALIAALKNACIFACABCEJOAFACJEABAALIGAAFEDGBHCGMHJADAHEFGBHDGIHEGFGMABDLAAIGAJIGAJJFACJFACAGFEGBGDGBGMGNACDKAAIIAJIIAJJOACLGABAJFAGIGFGBHAHIGBGMGNAADKAALEAILEAIJEACJOACACJEABAALAFNAFECHCGFGOHEACAJFAGIGFGBHAHIGBGMGNAADKAALEAILEAIJEACJOACAGFEGBGDGBGMGNACDKAAIIAJIIAJJOACLGABABCELOACACJEABAALMGOAFELGBHEGJGFACAHEFGBHDGIHEGFGMABIHABAAKGAKKGAKKBACKBACAJFAGIGFGBHAHIGBGMGNAAIEABAAIEAKIEAKJOACKIACABCEBOABCELCAFABCELKADABCELGAEABCEJKABACJEABAAKIEBAFEDGBHCGMHJAEAHEFGBHDGIHEGFGMABCGAAJOAGJOAGIAACIAACAHEFGBHDGIHEGFGMABCHAAKGAGKGAGIBACIBACAJFAGIGFGBHAHIGBGMGNAACEAAIEAGIEAGLOABIIACAGFEGBGDGBGMGNACCFAAKAAGKAAGIJACKBABABCEIGABACJEABAAIMEDAFELGBHEGJGFACAHEFGBHDGIHEGFGMABCLAAIGAHIGAHIFACIFACAGFEGBGDGBGMGNACCKAAIIAHIIAHIOACKGABACJEABAALMFFAGEEHFHDHEGJGOABAGFEGBGDGBGMGNACDIAALIAILIAIJMACLEABACJEABAAKMDGAEFEGPGOHJADAHEFGBHDGIHEGFGMABCBAALGAFLGAFLLABLLABAJFAGIGFGBHAHIGBGMGNAACDAALMAFLMAFLNABIHACAGFEGBGDGBGMGNACCDAAJAAGJAAGIHACJPABACJEABAAKACFAFELGBHEGJGFAEAHEFGBHDGIHEGFGMABBFAAJGAEJGAEKPABKPABAGFEGBGDGBGMGNACBGAAKIAEKIAELKABJCABAJFAGIGFGBHAHIGBGMGNAABEAAIEAEIEAEKOABLIABAJFAGIGFGBHAHIGBGMGNAABGAAJEAEJEAELAABLKABABCEJGACACJEABAAIAELAFECHCGFGOHEACAGFEGBGDGBGMGNACDAAALIAHLIAHJEACKMABAHEFGBHDGIHEGFGMABCOAAJOAHJOAHIIACIIACABCEKGADACJEABAAKADOAFELGBHEGJGFABAGFEGBGDGBGMGNACCEAAJIAGJIAGIIACKAABACJEABAALMCDAGEEHFHDHEGJGOABAHEFGBHDGIHEGFGMABBDAAIGAEIGAEKNABKNABABCELKADACJEABAAKMDGAFEMGFHCGPHJABAGFEGBGDGBGMGNACCBAAIAAGIAAGIFACJNABABCEICACABCEJOAFACJEABAALMCDAEFEGPGOHJACAGFEGBGDGBGMGNACBHAALAAELAAELLABJDABAHEFGBHDGIHEGFGMABBFAAJGAEJGAEKPABKPABABCELKADABCELOACACJEABAAIIEOAGFDHJGMHGGJGBABAGFEGBGDGBGMGNACDDAAJAAIJAAIJHACKPABACJEABAAIIEOAFECHCGFGOHEABAJFAGIGFGBHAHIGBGMGNAADAAAKEAHKEAHIKACJEACABCEIGABABCEIKAFACJEABAAIEHCAGEEHFHDHEGJGOAEAJFAGIGFGBHAHIGBGMGNAAIFABAAIMAKIMAKJPACKJACAJFAGIGFGBHAHIGBGMGNAAIGABAAJEAKJEAKKAACKKACAHEFGBHDGIHEGFGMABIDABAAIGAKIGAKJNACJNACAHEFGBHDGIHEGFGMABIGABAAJOAKJOAKKAACKAACACJEABAAIAELAGEEHFHDHEGJGOACAHEFGBHDGIHEGFGMABCNAAJGAHJGAHIHACIHACAJFAGIGFGBHAHIGBGMGNAACPAAJMAHJMAHIJACJDACABCEJGACADDCAACKAAABAHEFGBHDGIHEGFGMABADAAIGACIGACJNABJNABAAMNCC--']
def dearmor(s):
s = s[1:-2]
out = StringIO()
for i in xrange(0, len(s), 2):
b = (ord(s[i]) - ord('A')) & 0xf
b = (b << 4) | ((ord(s[i+1]) - ord('A')) & 0xf)
out.write(chr(b))
return out.getvalue()[:-2]
class Deserializer(object):
def __init__(self, f):
if isinstance(f, str):
f = StringIO(f)
self.f = f
def u8(self):
return struct.unpack('<B', self.f.read(1))[0]
def u16(self):
return struct.unpack('<H', self.f.read(2))[0]
def u32(self):
return struct.unpack('<I', self.f.read(4))[0]
def number(self):
b = self.u8()
neg = b & 0x40
val = b & 0x3F
shift = 6
while b & 0x80:
b = self.u8()
val |= (b & 0x7f) << shift
if neg:
val = -val
return val
def string(self):
count = self.number()
return self.f.read(count)
class Monster(object):
def __init__(self, d):
self.name = d.string()
self.kind_id = d.number()
self.kind = KINDS[self.kind_id]
self.level = d.number()
self.xp = d.number()
self.health = d.number()
self.max_health = d.number()
self.attack = d.number()
self.defense = d.number()
class GenMonster(object):
def __init__(self, kind_id, level):
self.level = level
self.kind_id = kind_id
self.kind = KINDS[kind_id]
self.name = self.kind['name']
self.max_health = self.kind['max_health'] + level * 8
self.attack = self.kind['attack'] + level * 1
self.defense = self.kind['defense'] + level * 1
self.xp = 0
self.health = self.max_health
class Vet(object):
def __init__(self, d):
self.icon = 'V'
class Trainer(object):
def __init__(self, d):
self.icon = chr(d.number())
self.player_won = bool(d.number())
self.reward = d.number()
self.name = d.string()
self.team = []
for i in xrange(d.number()):
self.team.append(Monster(d))
class Treasure(object):
def __init__(self, d):
self.icon = chr(d.number())
self.avail = self.icon == '$'
self.value = d.number()
class NameChanger(object):
def __init__(self, d):
self.icon = 'N'
class Empty(object):
def __init__(self):
self.icon = ' '
class Wall(object):
def __init__(self):
self.icon = '#'
class Player(object):
def __init__(self, d):
self.x = d.number()
self.y = d.number()
self.icon = chr(d.u8())
self.money = d.number()
self.team = []
for i in xrange(d.number()):
self.team.append(Monster(d))
self.bank = []
for i in xrange(d.number()):
self.bank.append(Monster(d))
class Map(object):
def __init__(self, d):
self.width = d.number()
self.height = d.number()
self.start_x = d.number()
self.start_y = d.number()
self.next_id = d.number()
self.tiles = array.array('B')
i = 0
while i < self.width * self.height:
count = d.number()
if count < 0:
raise Exception('Bad RLE count')
tile = d.u8()
for j in xrange(count):
self.tiles.append(tile)
i += count
if i != self.width * self.height:
raise Exception('Bad RLE')
self.objects = {}
for i in xrange(self.next_id):
self.objects[i] = self.decode_mapobject(d)
self.objects[0xFE] = Empty()
self.objects[0xFF] = Wall()
self.player = Player(d)
def decode_mapobject(self, d):
type_ = d.number()
if type_ == 0:
return Vet(d)
elif type_ == 1:
return Treasure(d)
elif type_ == 2:
return Trainer(d)
elif type_ == 3:
return NameChanger(d)
else:
raise Exception('Bad map object type (%d)' % type_)
def get_tile(self, x, y):
return self.objects[self.tiles[x + y * self.width]]
def display(self):
out = StringIO()
out.write('\x1B[2J')
out.write('Money: %6d\n' % self.player.money)
for y in xrange(self.height):
for x in xrange(self.width):
if (x, y) == (self.player.x, self.player.y):
out.write('*')
else:
out.write(self.get_tile(x, y).icon)
out.write('\n')
return out.getvalue()
class Random(object):
def __init__(self, state0, state1):
self.state0 = 168642907
self.state1 = 182064376
def _update(self):
if self.state0 & 1:
self.state0 ^= 0x29D1E9EB << 1
self.state0 >>= 1
if self.state1 & 1:
self.state1 ^= 0xF5CDE95 << 1
self.state1 >>= 1
def _rand(self):
self._update()
return (self.state0 ^ self.state1) & 0xFFFF
def randint(self, upper):
r = self._rand()
while r > 0x10000 / upper * upper - 1:
r = self._rand()
return r % upper
class Game(object):
def __init__(self, state_string, recv, send):
self.MAX_STEPS = 600 v
self.send = send
self.map = Map(Deserializer(dearmor(state_string)))
self.rand = Random(168642907, 182064376)
def current(self):
return (self.map.player.x, self.map.player.y)
def calc_weight(self, p):
x, y, d = p
if (x, y) not in self.visited:
return 5.0
dt = min(self.time - self.visited[(x, y)], 40)
return dt / 10.0
def next_step(self):
OPTIONS = [(1, 0, 'd'), (-1, 0, 'a'), (0, 1, 's'), (0, -1, 'w')]
self.visited[self.current()] = self.time
self.time += 1
options = map(lambda p: (p[0] + self.map.player.x, p[1] + self.map.player.y, p[2]), OPTIONS)
options = filter(lambda p: 0 <= p[0] < self.map.width, options)
options = filter(lambda p: 0 <= p[1] < self.map.height, options)
options = filter(lambda p: not isinstance(self.map.get_tile(p[0], p[1]), Wall), options)
options = map(lambda p: (p, self.calc_weight(p)), options)
total = sum(weight for p, weight in options)
r = random.uniform(0, total)
upto = 0
for p, weight in options:
if upto + weight > r:
return p
upto += weight
def handle_vet(self):
self.recv("Welcome to Vet Center! How can we help you?\n")
while True:
self.recv("Menu: (G)ood-bye, (H)eal, (S)tore a Monster, (W)ithdraw a Monster\n")
self.send('g')
break
def handle_treasure(self, t):
if t.avail:
self.map.player.money += t.value
t.avail = False
t.icon = ' '
def handle_trainer(self, t):
if t.player_won:
return
self.recv("New challenger! Trainer %s prepares to battle!\n" % t.name)
t.player_won = self.battle(t.team)
if t.player_won:
self.map.player.money += t.reward
t.icon = ' '
def handle_wild(self):
level = self.get_level(*self.current())
m = self.random_monster(level)
self.recv("A wild %s attacks!\n" % m.name)
self.battle([m], wild=True)
def handle(self, tile):
if isinstance(tile, Empty):
return True
elif isinstance(tile, Wall):
return False
elif isinstance(tile, Vet):
self.handle_vet()
return False
elif isinstance(tile, Treasure):
self.handle_treasure(tile)
return True
elif isinstance(tile, Trainer):
self.handle_trainer(tile)
return tile.player_won
else:
raise Exception('Bad tile type')
def handle_death(self):
alive = list(filter(lambda m: m.health > 0, self.map.player.team))
if len(alive):
return
self.map.player.money /= 2
self.map.player.x, self.map.player.y = self.map.start_x, self.map.start_y
for m in self.map.player.team:
m.health = m.max_health
def random_walk(self):
self.time = 1
self.visited = {}
steps = 0
self.recv(self.map.display())
while steps < self.MAX_STEPS:
steps += 1
x, y, direction = self.next_step()
self.send(direction)
new_tile = self.map.get_tile(x, y)
if self.handle(new_tile):
self.map.player.x = x
self.map.player.y = y
if self.rand.randint(10) == 0:
self.handle_wild()
self.handle_death()
self.recv(self.map.display())
def save_game(self, state):
self.recv(self.map.display())
self.send('1')
self.recv('%s\nPress (C) to continue...\n' % state)
self.send('c')
def calc_dmg(self, attacker, defender, power):
def multiplier(a, d):
COMBOS = {
('earth', 'wind'): 200,
('wind', 'fire'): 200,
('fire', 'earth'): 200,
('wind', 'earth'): 50,
('fire', 'wind'): 50,
('earth', 'fire'): 50,
('earth', 'earth'): 100,
('wind', 'wind'): 100,
('fire', 'fire'): 100
}
return COMBOS[(a, d)]
dmg = attacker.attack - defender.defense + power * multiplier(attacker.kind['type'], defender.kind['type']) / 100
dmg = dmg * self.rand.randint(110) / 100
if dmg > 0:
defender.health = max(0, defender.health - dmg)
def capture(self, m):
if len(self.map.player.team) < 6:
self.map.player.team.append(m)
elif len(self.map.player.bank) < 99:
self.map.player.bank.append(m)
else:
self.recv("No space on team or in bank. %s was abandoned.\n" % m.name)
def gain_xp(self, pm, em):
xp = em.level * 100
pm.xp += xp
to_level_up = 100 * pm.level * pm.level / 4
if pm.xp > to_level_up:
pm.xp -= to_level_up
if pm.level < 99:
pm.level += 1
bonus = self.rand.randint(299)
pm.max_health += (800 + bonus) / 100
pm.attack += (100 + bonus) / 100
pm.defense += (100 + bonus) / 100
pm.health = pm.max_health
def battle(self, enemy, wild=False):
player = list(filter(lambda m: m.health > 0, self.map.player.team))
enemy = list(enemy)
for m in enemy:
m.health = m.max_health
while len(player) and len(enemy):
pm, em = player[0], enemy[0]
self.recv("%s (Lvl %d) versus %s (Lvl %d)\n" % (pm.name, pm.level, em.name, em.level))
while True:
defending = False
self.recv("\nPlayer: %s\n Health %d / %d\n" % (pm.name, pm.health, pm.max_health))
self.recv("\nEnemy: %s\n Health %d / %d\n" % (em.name, em.health, em.max_health))
self.recv("Menu: (A)ttack, (D)efend, (C)apture, (R)etreat\n")
r = random.randint(0, 99)
if r < 50:
self.send("a")
for i in xrange(len(pm.kind['attacks'])):
name, power = pm.kind['attacks'][i]
self.recv("%d. %s (%d)\n" % (i+1, name, power))
self.recv("What attack to perform?\n")
i = random.randint(1, len(pm.kind['attacks']))
self.send(str(i))
name, power = pm.kind['attacks'][i-1]
self.calc_dmg(pm, em, power)
if em.health == 0:
self.gain_xp(pm, em)
del enemy[0]
break
elif r < 70:
self.send("d")
defending = True
self.recv("%s takes a defensive stance.\n" % pm.name)
elif r < 90:
self.send("c")
if wild and em.health * 10 < em.max_health and self.rand.randint(100) < 70:
self.recv("Successfully captured %s!\n" % em.name)
self.capture(em)
del enemy[0]
player = list(filter(lambda m: m.health > 0, self.map.player.team))
break
else:
self.recv("Unable to capture %s.\n" % em.name)
else:
self.send("r")
if self.rand.randint(100) < 30:
self.recv("Successful retreat!\n")
return False
self.recv("Unable to retreat...\n")
name, power = em.kind['attacks'][self.rand.randint(len(em.kind['attacks']))]
self.calc_dmg(em, pm, power * (70 if defending else 100) / 100)
if pm.health == 0:
del player[0]
break
return len(enemy) == 0
def get_level(self, x, y):
max_dist = abs(self.map.width - self.map.start_x) + abs(self.map.height - self.map.start_y)
dist = abs(x - self.map.start_x) + abs(y - self.map.start_y)
return max(1, 99 * dist / max_dist)
def random_monster(self, level):
kind_id = self.rand.randint(len(KINDS))
return GenMonster(kind_id, level)
class A(Actions):
def start(self):
self.state['state'] = 0
def string_recv(self, s):
for l in s.splitlines(True):
assert l[-1] == '\n'
self.read(delim='\n', expect=l)
#self.read(length=len(s), expect=s)
def string_send(self, s):
self.write(s)
def doit(self):
g = Game(STATES[self.state['state']], self.string_recv, self.string_send)
g.save_game('-KEABDCDCAADFDCPPABAAKBABPPADPOABPPALPOJEABPPBCPOIJABPPAGPOACPPAFPOABABANPOIJABPPAGPOABPPAGPOABPPAGPOAGPPABPOIKABPPACPOABPPABAGALPOAEPPAEPOABPPABACABPOIDABPPACPOAIPPAEPOABPPAGPOAGPPAGPOICABPPAGPOAGPPAGPOAKPPACPOABPPADPOICABPPABPOABCBACPOAHPPAIPOAIPPAIPOIBABPPAGPOAFPPAHPOACPPAEPOADPPABPOABADAEPOIDABPPABPOAEPPACPOAEPPACPOACPPADPOACPPAMPOIEABPPAGPOABCAABPOAHPPABAHBAPOIGABPPAGPOADPPADPOACPPBAPOAEPPAEPODKPPAIPOAFPPABCFACPOACPPAGPOABAEABAFAEPOACPPACPOAEPPACPOABPPAFPOACPPADPOCPPPAKPOAFPPABPOABPPACPOAGPPAHPOABPPAIPOAEPPADPOACPPADPOCPPPACPOACPPAFPOAGPPACPOABPPACPOAKPPABPOADPPBCPOABAJADPOCLPPADPOABPPAFPOAFPPADPOABPPADPOANPPAHPOABCHAMPOABAKADPOCIPPADPOABPPABPOABPPABPOABBPACPOAGPPACPOABPPAEPOAMPPBJPOABALCHPPACPOABPPAGPOAHPPACPOABPPACPOAHPPABPOABPPABPOABCKAMPOABCGAFPOABAIABPOABPPAIPOAKPPACPOABAMBFPPACPOABPPAEPOACPPAFPOAFPPADPOABPPBNPOABCJABCIABPOAEPPADPOAOPPAEPOBCPPAJPOADPPADPOABPPABPOAEPPABPOADPPBLPOABPPACPOAFPPAFPOAKPPAHPOBDPPAFPOACPPADPOACPPAEPOAEPPCAPOAFPPAHPOAJPPACPOABPPADPOABPPABPOBBPPAHPOABPPAEPOADPPAEPOAEPPAGPOABPPBHPOAGPPACPOABPPADPOAFPPACPOADPPABPOACPPACPOACPPABPOBAPPAIPOABPPALPOADPPAEPOADPPADPOABPPAFPOADPPAJPOAMPPABPOAGPPADPOADPPACPOACPPACPOABPPABPOBAPPAIPOACPPABPOABCCACPOABCDAFPOABPPAEPOAEPPAEPOABPPAFPOACPPAEPOACPPAEPOAKPPAMPOACPPAJPOBDPPAPPOABPPADPOABCEAFPPALPOABPPAFPOACPPACPOABCLAMPPBGPOBBPPAIPOADPPAGPOALPPAPPOADPPACPOAPPPBEPOBDPPADPOAJPPAEPOAHPPBBPOBHPPACPOABBDAJPOAFPPABPOBLPPAGPOAGPPANPOACPPAFPOBFPPAOPOAEPPACPOBJPPAHPOABPPACPOADPPAOPOAGPPACPOBDPPBFPOBJPPACPOACPPAKPOABCMAMPOAEPPABPOACPPABPOBDPPACPOABBCABPOABBEBAPOBIPPBNPOABPPAGPOBCPPBFPOABBBBDPPACPOACPPAJPOACPPAEPOACPPANPOACPPAFPOBCPPBGPOBCPPAEPOABCPACPOABPPAKPOABPPABPOABPPAFPOABPPADPOADPPABPOADPPAFPOBBPPBHPOBCPPABPOACPPACPOAFPPAGPOADPPAGPOABPPAFPOAGPPABPOBFPPANPOABBAAJPOBCPPAFPOAFPPAGPOACPPAFPOAEPPAFPOAEPPACPOABPPADPOAPPPADPOABPPACPOACPPAFPOABPPAIPOACPPABPOBBPPAGPOAGPPACPOABBOACPOAEPPABPOABCOAIPPADPOACPPAIPOAPPPABPOAHPPACPOACPPAEPOADPPACPOACPPABPOBBPPAEPOAKPPACPOAFPPBLPOALPPABPOAGPPACPOANPPACPOBBPPABPOAMPPAEPOAEPPBKPOABCNALPPABPOAGPPABPOAGPPAGPOACPPABPOBAPPACPOABDAALPPAFPOAEPPAJPOABBNABPOABBLAOPOAKPPADPOAGPPABPOAGPPAJPOAPPPADPOAHPPAJPOAEPPBDPOADPPABPOAOPPAFPOACPPADPOAHPPACPOACPPACPOABAOACPOBAPPAEPOABDBBLPOACPPACPOACPPABPOBDPPAFPOADPPAFPOABPPAEPOADPPAFPOBAPPACPOABPPAIPOABPPANPOAEPPADPOAGPPABPOANPPADPOABPPAMPOACPPABPOACPPACPOADPPAGPOBDPPAIPOABPPABBMALPOAHPPABPOAGPPADPOALPPBBPOABPPAOPOBDPPACPOABPPADPOABPPAJPOABPPAJPOABPPADPOADPPAFPOADPPABPOAHPPACPOAEPPANPOABAPALPOBJPPALPOABPPAPPOABPPAEPOACPPABPOABBIACPOACPPAIPOACPPADPOABBFAEPOABPPACPOAHPPAHPOBKPPBLPOABBKADPOAEPPAMPOACPPABPOABPPAMPOAEPPAFPOABPPABPOBMPPBEPOABPPADPOABDCAHPOABBJAKPOADPPAGPOABBGAEPOABPPABPOAFPPAJPOBKPPADPOABPPAFPOABPPAGPOAGPPABDDBAPOABBHACPOAJPPAGPOAIPPACPOABANACPPADPOABDEKFABPPAAACJEABAALEAHAFELGBHEGJGFAEAJFAGIGFGBHAHIGBGMGNAAABAAKMABKMABJLABKFABAJFAGIGFGBHAHIGBGMGNAAACAALEABLEABJMABKGABAHEFGBHDGIHEGFGMABACAALOABLOABJMABJMABAGFEGBGDGBGMGNACABAAIAACIAACKFABDNABCEKGADACJEABAAJABPAGEEHFHDHEGJGOADAHEFGBHDGIHEGFGMABBBAALGADLGADKLABKLABAHEFGBHDGIHEGFGMABBBAALGADLGADKLABKLABAHEFGBHDGIHEGFGMABBBAALGADLGADKLABKLABABCELGAEACJEABAAIABJAFECHCGFGOHEABAJFAGIGFGBHAHIGBGMGNAABBAAKMADKMADKLABLFABABCEIGABABCEIGABABCEICACABCEBOACJEABAALIEHAFECHCGFGOHEACAGFEGBGDGBGMGNACCMAAJIAHJIAHJAACKIABAHEFGBHDGIHEGFGMABCNAAJGAHJGAHIHACIHACACJEABAAKMEPAFEGGJGPGOGBACAJFAGIGFGBHAHIGBGMGNAADDAALMAHLMAHINACJHACAGFEGBGDGBGMGNACDAAALIAHLIAHJEACKMABACJEABAAIIGHAEFEGPGOHJAEAJFAGIGFGBHAHIGBGMGNAADMAAIEAJIEAJJGACKAACAJFAGIGFGBHAHIGBGMGNAADOAAJEAJJEAJJIACKCACAJFAGIGFGBHAHIGBGMGNAADMAAIEAJIEAJJGACKAACAHEFGBHDGIHEGFGMABDOAAJOAJJOAJJIACJIACACJEABAAKIIMABAFELGBHEGJGFAEAHEFGBHDGIHEGFGMABJHABAAKGAMKGAMLBACLBACAJFAGIGFGBHAHIGBGMGNAAJGABAAJEAMJEAMLAACLKACAGFEGBGDGBGMGNACJIABAALIAMLIAMLMACJEACAGFEGBGDGBGMGNACJIABAALIAMLIAMLMACJEACABCEIGAGABCEJKABACJEABAAKIHDAFEMGFHCGPHJACAGFEGBGDGBGMGNACIHABAALAAKLAAKKLACIDACAGFEGBGDGBGMGNACIJABAAIAALIAALKNACIFACABCEJOAFACJEABAALIGAAFEDGBHCGMHJADAHEFGBHDGIHEGFGMABDLAAIGAJIGAJJFACJFACAGFEGBGDGBGMGNACDKAAIIAJIIAJJOACLGABAJFAGIGFGBHAHIGBGMGNAADKAALEAILEAIJEACJOACACJEABAALAFNAFECHCGFGOHEACAJFAGIGFGBHAHIGBGMGNAADKAALEAILEAIJEACJOACAGFEGBGDGBGMGNACDKAAIIAJIIAJJOACLGABABCELOACACJEABAALMGOAFELGBHEGJGFACAHEFGBHDGIHEGFGMABIHABAAKGAKKGAKKBACKBACAJFAGIGFGBHAHIGBGMGNAAIEABAAIEAKIEAKJOACKIACABCEBOABCELCAFABCELKADABCELGAEABCEJKABACJEABAAKIEBAFEDGBHCGMHJAEAHEFGBHDGIHEGFGMABCGAAJOAGJOAGIAACIAACAHEFGBHDGIHEGFGMABCHAAKGAGKGAGIBACIBACAJFAGIGFGBHAHIGBGMGNAACEAAIEAGIEAGLOABIIACAGFEGBGDGBGMGNACCFAAKAAGKAAGIJACKBABABCEIGABACJEABAAIMEDAFELGBHEGJGFACAHEFGBHDGIHEGFGMABCLAAIGAHIGAHIFACIFACAGFEGBGDGBGMGNACCKAAIIAHIIAHIOACKGABACJEABAALMFFAGEEHFHDHEGJGOABAGFEGBGDGBGMGNACDIAALIAILIAIJMACLEABACJEABAAKMDGAEFEGPGOHJADAHEFGBHDGIHEGFGMABCBAALGAFLGAFLLABLLABAJFAGIGFGBHAHIGBGMGNAACDAALMAFLMAFLNABIHACAGFEGBGDGBGMGNACCDAAJAAGJAAGIHACJPABACJEABAAKACFAFELGBHEGJGFAEAHEFGBHDGIHEGFGMABBFAAJGAEJGAEKPABKPABAGFEGBGDGBGMGNACBGAAKIAEKIAELKABJCABAJFAGIGFGBHAHIGBGMGNAABEAAIEAEIEAEKOABLIABAJFAGIGFGBHAHIGBGMGNAABGAAJEAEJEAELAABLKABABCEJGACACJEABAAIAELAFECHCGFGOHEACAGFEGBGDGBGMGNACDAAALIAHLIAHJEACKMABAHEFGBHDGIHEGFGMABCOAAJOAHJOAHIIACIIACABCEKGADACJEABAAKADOAFELGBHEGJGFABAGFEGBGDGBGMGNACCEAAJIAGJIAGIIACKAABACJEABAALMCDAGEEHFHDHEGJGOABAHEFGBHDGIHEGFGMABBDAAIGAEIGAEKNABKNABABCELKADACJEABAAKMDGAFEMGFHCGPHJABAGFEGBGDGBGMGNACCBAAIAAGIAAGIFACJNABABCEICACABCEJOAFACJEABAALMCDAEFEGPGOHJACAGFEGBGDGBGMGNACBHAALAAELAAELLABJDABAHEFGBHDGIHEGFGMABBFAAJGAEJGAEKPABKPABABCELKADABCELOACACJEABAAIIEOAGFDHJGMHGGJGBABAGFEGBGDGBGMGNACDDAAJAAIJAAIJHACKPABACJEABAAIIEOAFECHCGFGOHEABAJFAGIGFGBHAHIGBGMGNAADAAAKEAHKEAHIKACJEACABCEIGABABCEIKAFACJEABAAIEHCAGEEHFHDHEGJGOAEAJFAGIGFGBHAHIGBGMGNAAIFABAAIMAKIMAKJPACKJACAJFAGIGFGBHAHIGBGMGNAAIGABAAJEAKJEAKKAACKKACAHEFGBHDGIHEGFGMABIDABAAIGAKIGAKJNACJNACAHEFGBHDGIHEGFGMABIGABAAJOAKJOAKKAACKAACACJEABAAIAELAGEEHFHDHEGJGOACAHEFGBHDGIHEGFGMABCNAAJGAHJGAHIHACIHACAJFAGIGFGBHAHIGBGMGNAACPAAJMAHJMAHIJACJDACABCEJGACADDCAACKAAABAHEFGBHDGIHEGFGMABADAAIGACIGACJNABJNABAAMNCC--')
g.random_walk()
| true | true |
f71ebcebcd16f7a9e6abc4c430836637c223d9c7 | 3,226 | py | Python | tests/test_missing_impute.py | UBC-MDS/eazieda | 214f3907e71ddbaa1b64c7a201cb0f07661263ac | [
"MIT"
] | 1 | 2021-03-12T16:19:58.000Z | 2021-03-12T16:19:58.000Z | tests/test_missing_impute.py | UBC-MDS/eazieda | 214f3907e71ddbaa1b64c7a201cb0f07661263ac | [
"MIT"
] | 44 | 2021-02-26T19:15:19.000Z | 2021-03-20T00:07:51.000Z | tests/test_missing_impute.py | UBC-MDS/eazieda | 214f3907e71ddbaa1b64c7a201cb0f07661263ac | [
"MIT"
] | 5 | 2021-03-07T03:17:58.000Z | 2022-01-28T18:26:38.000Z | from eazieda.missing_impute import missing_impute
import pandas as pd
import numpy as np
from pytest import raises, fixture
@fixture
def df_miss():
df = pd.DataFrame(
[[1.0, "x"], [np.nan, "y"], [2.0, np.nan], [3.0, "y"]],
columns=["a", "b"],
)
return df
@fixture
def df_miss_2():
df = pd.DataFrame(
[[1.0, "x"], [np.nan, "y"], [2.0, np.nan], [3.0, "y"], [4.0, "y"]],
columns=["a", "b"],
)
return df
def test_missing_impute(df_miss, df_miss_2):
# Test with default arguments
expected_output_default = pd.DataFrame(
data={"a": [1.0, 2.0, 2.0, 3.0], "b": ["x", "y", "y", "y"]}
).reset_index(drop=True)
missing_output_default = missing_impute(df_miss)
assert pd.DataFrame.equals(missing_output_default, expected_output_default)
# Test with two drop arguments selected at the same time
expected_output_two_drop = pd.DataFrame(
data={"a": [1.0, 3.0], "b": ["x", "y"]}
).reset_index(drop=True)
missing_output_two_drop = missing_impute(
df_miss, method_num="drop", method_non_num="drop"
)
assert pd.DataFrame.equals(
missing_output_two_drop, expected_output_two_drop
)
# Test with method_num="mean", method_non_num="drop"
expected_output_one_drop = pd.DataFrame(
data={"a": [1.0, 2.0, 3.0], "b": ["x", "y", "y"]}
).reset_index(drop=True)
missing_output_one_drop = missing_impute(df_miss, method_non_num="drop")
assert pd.DataFrame.equals(
expected_output_one_drop, missing_output_one_drop
)
# Test with method_num="median", method_non_num="most_frequent"
expected_output_median = pd.DataFrame(
data={"a": [1.0, 2.0, 2.0, 3.0], "b": ["x", "y", "y", "y"]}
).reset_index(drop=True)
missing_output_median = missing_impute(df_miss, method_num="median")
assert pd.DataFrame.equals(missing_output_median, expected_output_median)
# Test with method_num="median", method_non_num="drop"
expected_output_median_drop = pd.DataFrame(
data={"a": [1.0, 2.0, 3.0], "b": ["x", "y", "y"]}
).reset_index(drop=True)
missing_output_median_drop = missing_impute(
df_miss, method_num="median", method_non_num="drop"
)
assert pd.DataFrame.equals(
missing_output_median_drop, expected_output_median_drop
)
# Test with method_num="drop", method_non_num="most_frequent"
expected_output_drop_freq = pd.DataFrame(
[[1.0, "x"], [2.0, "y"], [3.0, "y"], [4.0, "y"]], columns=["a", "b"],
).reset_index(drop=True)
missing_output_drop_freq = missing_impute(
df_miss_2, method_num="drop", method_non_num="most_frequent"
)
assert pd.DataFrame.equals(
missing_output_drop_freq, expected_output_drop_freq
)
# Test whether a not dataframe input raises TypeError
with raises(TypeError):
missing_impute(5)
# Test whether invaild input of method_num raises ValueError
with raises(ValueError):
missing_impute(df_miss, method_num="mea")
# Test whether invaild input of method_non_num raises ValueError
with raises(ValueError):
missing_impute(df_miss, method_num="mean", method_non_num="most_freq")
| 31.320388 | 79 | 0.651271 | from eazieda.missing_impute import missing_impute
import pandas as pd
import numpy as np
from pytest import raises, fixture
@fixture
def df_miss():
df = pd.DataFrame(
[[1.0, "x"], [np.nan, "y"], [2.0, np.nan], [3.0, "y"]],
columns=["a", "b"],
)
return df
@fixture
def df_miss_2():
df = pd.DataFrame(
[[1.0, "x"], [np.nan, "y"], [2.0, np.nan], [3.0, "y"], [4.0, "y"]],
columns=["a", "b"],
)
return df
def test_missing_impute(df_miss, df_miss_2):
expected_output_default = pd.DataFrame(
data={"a": [1.0, 2.0, 2.0, 3.0], "b": ["x", "y", "y", "y"]}
).reset_index(drop=True)
missing_output_default = missing_impute(df_miss)
assert pd.DataFrame.equals(missing_output_default, expected_output_default)
expected_output_two_drop = pd.DataFrame(
data={"a": [1.0, 3.0], "b": ["x", "y"]}
).reset_index(drop=True)
missing_output_two_drop = missing_impute(
df_miss, method_num="drop", method_non_num="drop"
)
assert pd.DataFrame.equals(
missing_output_two_drop, expected_output_two_drop
)
expected_output_one_drop = pd.DataFrame(
data={"a": [1.0, 2.0, 3.0], "b": ["x", "y", "y"]}
).reset_index(drop=True)
missing_output_one_drop = missing_impute(df_miss, method_non_num="drop")
assert pd.DataFrame.equals(
expected_output_one_drop, missing_output_one_drop
)
expected_output_median = pd.DataFrame(
data={"a": [1.0, 2.0, 2.0, 3.0], "b": ["x", "y", "y", "y"]}
).reset_index(drop=True)
missing_output_median = missing_impute(df_miss, method_num="median")
assert pd.DataFrame.equals(missing_output_median, expected_output_median)
expected_output_median_drop = pd.DataFrame(
data={"a": [1.0, 2.0, 3.0], "b": ["x", "y", "y"]}
).reset_index(drop=True)
missing_output_median_drop = missing_impute(
df_miss, method_num="median", method_non_num="drop"
)
assert pd.DataFrame.equals(
missing_output_median_drop, expected_output_median_drop
)
expected_output_drop_freq = pd.DataFrame(
[[1.0, "x"], [2.0, "y"], [3.0, "y"], [4.0, "y"]], columns=["a", "b"],
).reset_index(drop=True)
missing_output_drop_freq = missing_impute(
df_miss_2, method_num="drop", method_non_num="most_frequent"
)
assert pd.DataFrame.equals(
missing_output_drop_freq, expected_output_drop_freq
)
with raises(TypeError):
missing_impute(5)
with raises(ValueError):
missing_impute(df_miss, method_num="mea")
with raises(ValueError):
missing_impute(df_miss, method_num="mean", method_non_num="most_freq")
| true | true |
f71ebdc00dcdc70fff18cf0ff576555cd05da13f | 2,262 | py | Python | src/scETM/models/BatchClassifier.py | hui2000ji/scETM | 0a34c345d70b262ebc38e033bae683fa4929ed3e | [
"BSD-3-Clause"
] | 24 | 2021-07-09T12:59:31.000Z | 2022-03-04T22:31:41.000Z | src/scETM/models/BatchClassifier.py | hui2000ji/scETM | 0a34c345d70b262ebc38e033bae683fa4929ed3e | [
"BSD-3-Clause"
] | 3 | 2021-09-07T11:14:19.000Z | 2022-02-15T01:38:09.000Z | src/scETM/models/BatchClassifier.py | hui2000ji/scETM | 0a34c345d70b262ebc38e033bae683fa4929ed3e | [
"BSD-3-Clause"
] | 3 | 2021-12-02T23:44:37.000Z | 2022-02-11T16:46:45.000Z | from typing import Sequence, Mapping
from numpy import mod
import torch
from torch import nn
from torch import optim
import torch.nn.functional as F
from .model_utils import get_fully_connected_layers
from scETM.logging_utils import log_arguments
class BatchClassifier(nn.Module):
"""Docstring (TODO)
"""
@log_arguments
def __init__(self,
n_input: int,
n_output: int,
hidden_sizes: Sequence[int],
bn: bool = False,
bn_track_running_stats: bool = False,
dropout_prob = 0.2,
adversarial_loss = 'confuse',
device=torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
) -> None:
"""Docstring (TODO)
"""
super().__init__()
self.batch_clf = get_fully_connected_layers(
n_trainable_input=n_input,
n_trainable_output=n_output,
hidden_sizes=hidden_sizes,
bn=bn,
bn_track_running_stats=bn_track_running_stats,
dropout_prob=dropout_prob,
).to(device)
self.n_output = n_output
assert adversarial_loss in ('confuse', 'reverse')
self.adversarial_loss = adversarial_loss
def forward(self, X: torch.Tensor, y: torch.Tensor) -> Mapping[str, torch.Tensor]:
"""Docstring (TODO)
"""
logit = self.batch_clf(X)
if not self.training:
return dict(logit=logit)
clf_loss = F.cross_entropy(logit, y)
if self.adversarial_loss == 'confuse':
model_loss = (-F.log_softmax(logit, dim=-1) * torch.zeros_like(logit).fill_(1/self.n_output)).sum(-1).mean()
else:
model_loss = -clf_loss
return clf_loss, dict(logit=logit, model_loss=model_loss), dict(clf_loss=clf_loss.detach().item())
def train_step(self,
optimizer: optim.Optimizer,
X: torch.Tensor,
y: torch.Tensor
) -> Mapping[str, torch.Tensor]:
"""Docstring (TODO)
"""
self.train()
optimizer.zero_grad()
loss, fwd_dict, new_records = self(X, y)
loss.backward()
optimizer.step()
new_records['clf_acc'] = (fwd_dict['logit'].argmax(1) == y).to(torch.float).mean().detach().item()
return new_records | 31.416667 | 120 | 0.616711 | from typing import Sequence, Mapping
from numpy import mod
import torch
from torch import nn
from torch import optim
import torch.nn.functional as F
from .model_utils import get_fully_connected_layers
from scETM.logging_utils import log_arguments
class BatchClassifier(nn.Module):
@log_arguments
def __init__(self,
n_input: int,
n_output: int,
hidden_sizes: Sequence[int],
bn: bool = False,
bn_track_running_stats: bool = False,
dropout_prob = 0.2,
adversarial_loss = 'confuse',
device=torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
) -> None:
super().__init__()
self.batch_clf = get_fully_connected_layers(
n_trainable_input=n_input,
n_trainable_output=n_output,
hidden_sizes=hidden_sizes,
bn=bn,
bn_track_running_stats=bn_track_running_stats,
dropout_prob=dropout_prob,
).to(device)
self.n_output = n_output
assert adversarial_loss in ('confuse', 'reverse')
self.adversarial_loss = adversarial_loss
def forward(self, X: torch.Tensor, y: torch.Tensor) -> Mapping[str, torch.Tensor]:
logit = self.batch_clf(X)
if not self.training:
return dict(logit=logit)
clf_loss = F.cross_entropy(logit, y)
if self.adversarial_loss == 'confuse':
model_loss = (-F.log_softmax(logit, dim=-1) * torch.zeros_like(logit).fill_(1/self.n_output)).sum(-1).mean()
else:
model_loss = -clf_loss
return clf_loss, dict(logit=logit, model_loss=model_loss), dict(clf_loss=clf_loss.detach().item())
def train_step(self,
optimizer: optim.Optimizer,
X: torch.Tensor,
y: torch.Tensor
) -> Mapping[str, torch.Tensor]:
self.train()
optimizer.zero_grad()
loss, fwd_dict, new_records = self(X, y)
loss.backward()
optimizer.step()
new_records['clf_acc'] = (fwd_dict['logit'].argmax(1) == y).to(torch.float).mean().detach().item()
return new_records | true | true |
f71ebec53b1716481e14b033e6c02c932b2b09ae | 569 | py | Python | app/migrations/0005_attacker.py | ArighnaIITG/Resurrector | 319717fc7382af953d9bada084d00f0f0dd635e2 | [
"MIT"
] | null | null | null | app/migrations/0005_attacker.py | ArighnaIITG/Resurrector | 319717fc7382af953d9bada084d00f0f0dd635e2 | [
"MIT"
] | null | null | null | app/migrations/0005_attacker.py | ArighnaIITG/Resurrector | 319717fc7382af953d9bada084d00f0f0dd635e2 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-01-29 10:24
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('app', '0004_victim_gender'),
]
operations = [
migrations.CreateModel(
name='Attacker',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('ip', models.CharField(max_length=50)),
],
),
]
| 24.73913 | 114 | 0.588752 |
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('app', '0004_victim_gender'),
]
operations = [
migrations.CreateModel(
name='Attacker',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('ip', models.CharField(max_length=50)),
],
),
]
| true | true |
f71ebf6e2a819992e2481ad01062fab1764d0791 | 1,758 | py | Python | models/TVAE.py | traindb-project/traindb-model | 9ffdb8c0195051630692dbe6dfd8b9fe816a619f | [
"Apache-2.0"
] | 4 | 2021-12-08T05:38:18.000Z | 2022-02-23T01:33:49.000Z | models/TVAE.py | traindb-project/traindb-model | 9ffdb8c0195051630692dbe6dfd8b9fe816a619f | [
"Apache-2.0"
] | null | null | null | models/TVAE.py | traindb-project/traindb-model | 9ffdb8c0195051630692dbe6dfd8b9fe816a619f | [
"Apache-2.0"
] | null | null | null | """
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import logging
import rdt
import sdv
from TrainDBBaseModel import TrainDBSynopsisModel
import pandas as pd
import torch
LOGGER = logging.getLogger(__name__)
class TVAE(TrainDBSynopsisModel):
def train(self, real_data, table_metadata):
self.columns, _ = self.get_columns(real_data, table_metadata)
LOGGER.info("Training %s", self.__class__.__name__)
model_kwargs = {}
self.model = sdv.tabular.TVAE(table_metadata=table_metadata, **model_kwargs)
self.model.fit(real_data)
def save(self, output_path):
self.model.save(output_path + '/model.pkl')
torch.save({
'columns': self.columns
}, output_path + '/model_info.pth')
def load(self, input_path):
self.model = sdv.tabular.TVAE.load(input_path + '/model.pkl')
saved_model_info = torch.load(input_path + '/model_info.pth')
self.columns = saved_model_info['columns']
def synopsis(self, row_count):
LOGGER.info("Synopsis Generating %s", self.__class__.__name__)
synthetic_data = self.model.sample(row_count)
synthetic_data = pd.DataFrame(synthetic_data, columns=self.columns)
return synthetic_data
| 33.807692 | 84 | 0.706485 |
import logging
import rdt
import sdv
from TrainDBBaseModel import TrainDBSynopsisModel
import pandas as pd
import torch
LOGGER = logging.getLogger(__name__)
class TVAE(TrainDBSynopsisModel):
def train(self, real_data, table_metadata):
self.columns, _ = self.get_columns(real_data, table_metadata)
LOGGER.info("Training %s", self.__class__.__name__)
model_kwargs = {}
self.model = sdv.tabular.TVAE(table_metadata=table_metadata, **model_kwargs)
self.model.fit(real_data)
def save(self, output_path):
self.model.save(output_path + '/model.pkl')
torch.save({
'columns': self.columns
}, output_path + '/model_info.pth')
def load(self, input_path):
self.model = sdv.tabular.TVAE.load(input_path + '/model.pkl')
saved_model_info = torch.load(input_path + '/model_info.pth')
self.columns = saved_model_info['columns']
def synopsis(self, row_count):
LOGGER.info("Synopsis Generating %s", self.__class__.__name__)
synthetic_data = self.model.sample(row_count)
synthetic_data = pd.DataFrame(synthetic_data, columns=self.columns)
return synthetic_data
| true | true |
f71ec124894f9eb3d2d2ba49831d457c33672b13 | 152,850 | py | Python | yandex/cloud/mdb/mysql/v1/cluster_service_pb2.py | ovandriyanov/python-sdk | eec7dc65ef23789388fa46d13087d4a03cdc6e57 | [
"MIT"
] | null | null | null | yandex/cloud/mdb/mysql/v1/cluster_service_pb2.py | ovandriyanov/python-sdk | eec7dc65ef23789388fa46d13087d4a03cdc6e57 | [
"MIT"
] | null | null | null | yandex/cloud/mdb/mysql/v1/cluster_service_pb2.py | ovandriyanov/python-sdk | eec7dc65ef23789388fa46d13087d4a03cdc6e57 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: yandex/cloud/mdb/mysql/v1/cluster_service.proto
"""Generated protocol buffer code."""
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2
from google.protobuf import field_mask_pb2 as google_dot_protobuf_dot_field__mask__pb2
from google.protobuf import timestamp_pb2 as google_dot_protobuf_dot_timestamp__pb2
from google.type import timeofday_pb2 as google_dot_type_dot_timeofday__pb2
from yandex.cloud.api import operation_pb2 as yandex_dot_cloud_dot_api_dot_operation__pb2
from yandex.cloud.operation import operation_pb2 as yandex_dot_cloud_dot_operation_dot_operation__pb2
from yandex.cloud import validation_pb2 as yandex_dot_cloud_dot_validation__pb2
from yandex.cloud.mdb.mysql.v1 import backup_pb2 as yandex_dot_cloud_dot_mdb_dot_mysql_dot_v1_dot_backup__pb2
from yandex.cloud.mdb.mysql.v1 import cluster_pb2 as yandex_dot_cloud_dot_mdb_dot_mysql_dot_v1_dot_cluster__pb2
from yandex.cloud.mdb.mysql.v1 import database_pb2 as yandex_dot_cloud_dot_mdb_dot_mysql_dot_v1_dot_database__pb2
from yandex.cloud.mdb.mysql.v1 import user_pb2 as yandex_dot_cloud_dot_mdb_dot_mysql_dot_v1_dot_user__pb2
from yandex.cloud.mdb.mysql.v1.config import mysql5_7_pb2 as yandex_dot_cloud_dot_mdb_dot_mysql_dot_v1_dot_config_dot_mysql5__7__pb2
from yandex.cloud.mdb.mysql.v1.config import mysql8_0_pb2 as yandex_dot_cloud_dot_mdb_dot_mysql_dot_v1_dot_config_dot_mysql8__0__pb2
from yandex.cloud.mdb.mysql.v1 import maintenance_pb2 as yandex_dot_cloud_dot_mdb_dot_mysql_dot_v1_dot_maintenance__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='yandex/cloud/mdb/mysql/v1/cluster_service.proto',
package='yandex.cloud.mdb.mysql.v1',
syntax='proto3',
serialized_options=b'\n\035yandex.cloud.api.mdb.mysql.v1ZCgithub.com/yandex-cloud/go-genproto/yandex/cloud/mdb/mysql/v1;mysql',
create_key=_descriptor._internal_create_key,
serialized_pb=b'\n/yandex/cloud/mdb/mysql/v1/cluster_service.proto\x12\x19yandex.cloud.mdb.mysql.v1\x1a\x1cgoogle/api/annotations.proto\x1a google/protobuf/field_mask.proto\x1a\x1fgoogle/protobuf/timestamp.proto\x1a\x1bgoogle/type/timeofday.proto\x1a yandex/cloud/api/operation.proto\x1a&yandex/cloud/operation/operation.proto\x1a\x1dyandex/cloud/validation.proto\x1a&yandex/cloud/mdb/mysql/v1/backup.proto\x1a\'yandex/cloud/mdb/mysql/v1/cluster.proto\x1a(yandex/cloud/mdb/mysql/v1/database.proto\x1a$yandex/cloud/mdb/mysql/v1/user.proto\x1a/yandex/cloud/mdb/mysql/v1/config/mysql5_7.proto\x1a/yandex/cloud/mdb/mysql/v1/config/mysql8_0.proto\x1a+yandex/cloud/mdb/mysql/v1/maintenance.proto\"5\n\x11GetClusterRequest\x12 \n\ncluster_id\x18\x01 \x01(\tB\x0c\xe8\xc7\x31\x01\x8a\xc8\x31\x04<=50\"\x90\x01\n\x13ListClustersRequest\x12\x1f\n\tfolder_id\x18\x01 \x01(\tB\x0c\xe8\xc7\x31\x01\x8a\xc8\x31\x04<=50\x12\x1d\n\tpage_size\x18\x02 \x01(\x03\x42\n\xfa\xc7\x31\x06\x30-1000\x12\x1d\n\npage_token\x18\x03 \x01(\tB\t\x8a\xc8\x31\x05<=100\x12\x1a\n\x06\x66ilter\x18\x04 \x01(\tB\n\x8a\xc8\x31\x06<=1000\"e\n\x14ListClustersResponse\x12\x34\n\x08\x63lusters\x18\x01 \x03(\x0b\x32\".yandex.cloud.mdb.mysql.v1.Cluster\x12\x17\n\x0fnext_page_token\x18\x02 \x01(\t\"\xce\x05\n\x14\x43reateClusterRequest\x12\x1f\n\tfolder_id\x18\x01 \x01(\tB\x0c\xe8\xc7\x31\x01\x8a\xc8\x31\x04<=50\x12,\n\x04name\x18\x02 \x01(\tB\x1e\xe8\xc7\x31\x01\x8a\xc8\x31\x04<=63\xf2\xc7\x31\x0e[a-zA-Z0-9_-]*\x12\x1e\n\x0b\x64\x65scription\x18\x03 \x01(\tB\t\x8a\xc8\x31\x05<=256\x12\x8c\x01\n\x06labels\x18\x04 \x03(\x0b\x32;.yandex.cloud.mdb.mysql.v1.CreateClusterRequest.LabelsEntryB?\x82\xc8\x31\x04<=64\x8a\xc8\x31\x04<=63\xf2\xc7\x31\x0b[-_0-9a-z]*\xb2\xc8\x31\x06\x1a\x04\x31-63\xb2\xc8\x31\x12\x12\x10[a-z][-_0-9a-z]*\x12\x43\n\x0b\x65nvironment\x18\x05 \x01(\x0e\x32..yandex.cloud.mdb.mysql.v1.Cluster.Environment\x12:\n\x0b\x63onfig_spec\x18\x06 \x01(\x0b\x32%.yandex.cloud.mdb.mysql.v1.ConfigSpec\x12?\n\x0e\x64\x61tabase_specs\x18\x07 \x03(\x0b\x32\'.yandex.cloud.mdb.mysql.v1.DatabaseSpec\x12\x37\n\nuser_specs\x18\x08 \x03(\x0b\x32#.yandex.cloud.mdb.mysql.v1.UserSpec\x12\x37\n\nhost_specs\x18\t \x03(\x0b\x32#.yandex.cloud.mdb.mysql.v1.HostSpec\x12\x1c\n\nnetwork_id\x18\n \x01(\tB\x08\x8a\xc8\x31\x04<=50\x12\x1a\n\x12security_group_ids\x18\x0b \x03(\t\x12\x1b\n\x13\x64\x65letion_protection\x18\x0c \x01(\x08\x1a-\n\x0bLabelsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\"+\n\x15\x43reateClusterMetadata\x12\x12\n\ncluster_id\x18\x01 \x01(\t\"\xb0\x04\n\x14UpdateClusterRequest\x12 \n\ncluster_id\x18\x01 \x01(\tB\x0c\xe8\xc7\x31\x01\x8a\xc8\x31\x04<=50\x12/\n\x0bupdate_mask\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.FieldMask\x12\x1e\n\x0b\x64\x65scription\x18\x03 \x01(\tB\t\x8a\xc8\x31\x05<=256\x12\x8c\x01\n\x06labels\x18\x04 \x03(\x0b\x32;.yandex.cloud.mdb.mysql.v1.UpdateClusterRequest.LabelsEntryB?\x82\xc8\x31\x04<=64\x8a\xc8\x31\x04<=63\xf2\xc7\x31\x0b[-_0-9a-z]*\xb2\xc8\x31\x06\x1a\x04\x31-63\xb2\xc8\x31\x12\x12\x10[a-z][-_0-9a-z]*\x12:\n\x0b\x63onfig_spec\x18\x05 \x01(\x0b\x32%.yandex.cloud.mdb.mysql.v1.ConfigSpec\x12(\n\x04name\x18\x06 \x01(\tB\x1a\x8a\xc8\x31\x04<=63\xf2\xc7\x31\x0e[a-zA-Z0-9_-]*\x12H\n\x12maintenance_window\x18\x07 \x01(\x0b\x32,.yandex.cloud.mdb.mysql.v1.MaintenanceWindow\x12\x1a\n\x12security_group_ids\x18\x08 \x03(\t\x12\x1b\n\x13\x64\x65letion_protection\x18\t \x01(\x08\x1a-\n\x0bLabelsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\"+\n\x15UpdateClusterMetadata\x12\x12\n\ncluster_id\x18\x01 \x01(\t\"8\n\x14\x44\x65leteClusterRequest\x12 \n\ncluster_id\x18\x01 \x01(\tB\x0c\xe8\xc7\x31\x01\x8a\xc8\x31\x04<=50\"+\n\x15\x44\x65leteClusterMetadata\x12\x12\n\ncluster_id\x18\x01 \x01(\t\"8\n\x14\x42\x61\x63kupClusterRequest\x12 \n\ncluster_id\x18\x01 \x01(\tB\x0c\xe8\xc7\x31\x01\x8a\xc8\x31\x04<=50\"+\n\x15\x42\x61\x63kupClusterMetadata\x12\x12\n\ncluster_id\x18\x01 \x01(\t\"\xf6\x04\n\x15RestoreClusterRequest\x12\x17\n\tbackup_id\x18\x01 \x01(\tB\x04\xe8\xc7\x31\x01\x12.\n\x04time\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.TimestampB\x04\xe8\xc7\x31\x01\x12$\n\x04name\x18\x04 \x01(\tB\x16\xe8\xc7\x31\x01\xf2\xc7\x31\x0e[a-zA-Z0-9_-]*\x12\x1e\n\x0b\x64\x65scription\x18\x05 \x01(\tB\t\x8a\xc8\x31\x05<=256\x12\x8d\x01\n\x06labels\x18\x06 \x03(\x0b\x32<.yandex.cloud.mdb.mysql.v1.RestoreClusterRequest.LabelsEntryB?\x82\xc8\x31\x04<=64\x8a\xc8\x31\x04<=63\xf2\xc7\x31\x0b[-_0-9a-z]*\xb2\xc8\x31\x06\x1a\x04\x31-63\xb2\xc8\x31\x12\x12\x10[a-z][-_0-9a-z]*\x12\x43\n\x0b\x65nvironment\x18\x07 \x01(\x0e\x32..yandex.cloud.mdb.mysql.v1.Cluster.Environment\x12:\n\x0b\x63onfig_spec\x18\x08 \x01(\x0b\x32%.yandex.cloud.mdb.mysql.v1.ConfigSpec\x12\x37\n\nhost_specs\x18\t \x03(\x0b\x32#.yandex.cloud.mdb.mysql.v1.HostSpec\x12\x1c\n\nnetwork_id\x18\n \x01(\tB\x08\x8a\xc8\x31\x04<=50\x12\x1b\n\tfolder_id\x18\x0b \x01(\tB\x08\x8a\xc8\x31\x04<=50\x12\x1a\n\x12security_group_ids\x18\x0c \x03(\t\x1a-\n\x0bLabelsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\"?\n\x16RestoreClusterMetadata\x12\x12\n\ncluster_id\x18\x01 \x01(\t\x12\x11\n\tbackup_id\x18\x02 \x01(\t\"]\n\x1bStartClusterFailoverRequest\x12 \n\ncluster_id\x18\x01 \x01(\tB\x0c\xe8\xc7\x31\x01\x8a\xc8\x31\x04<=50\x12\x1c\n\thost_name\x18\x02 \x01(\tB\t\x8a\xc8\x31\x05<=253\"2\n\x1cStartClusterFailoverMetadata\x12\x12\n\ncluster_id\x18\x01 \x01(\t\"\xca\x02\n\x1cRescheduleMaintenanceRequest\x12 \n\ncluster_id\x18\x01 \x01(\tB\x0c\xe8\xc7\x31\x01\x8a\xc8\x31\x04<=50\x12\x65\n\x0freschedule_type\x18\x02 \x01(\x0e\x32\x46.yandex.cloud.mdb.mysql.v1.RescheduleMaintenanceRequest.RescheduleTypeB\x04\xe8\xc7\x31\x01\x12\x31\n\rdelayed_until\x18\x03 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\"n\n\x0eRescheduleType\x12\x1f\n\x1bRESCHEDULE_TYPE_UNSPECIFIED\x10\x00\x12\r\n\tIMMEDIATE\x10\x01\x12\x19\n\x15NEXT_AVAILABLE_WINDOW\x10\x02\x12\x11\n\rSPECIFIC_TIME\x10\x03\"f\n\x1dRescheduleMaintenanceMetadata\x12\x12\n\ncluster_id\x18\x01 \x01(\t\x12\x31\n\rdelayed_until\x18\x04 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\"\xae\x01\n\tLogRecord\x12-\n\ttimestamp\x18\x01 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x42\n\x07message\x18\x02 \x03(\x0b\x32\x31.yandex.cloud.mdb.mysql.v1.LogRecord.MessageEntry\x1a.\n\x0cMessageEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\"\xd8\x03\n\x16ListClusterLogsRequest\x12 \n\ncluster_id\x18\x01 \x01(\tB\x0c\xe8\xc7\x31\x01\x8a\xc8\x31\x04<=50\x12\x15\n\rcolumn_filter\x18\x02 \x03(\t\x12S\n\x0cservice_type\x18\x03 \x01(\x0e\x32=.yandex.cloud.mdb.mysql.v1.ListClusterLogsRequest.ServiceType\x12-\n\tfrom_time\x18\x04 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12+\n\x07to_time\x18\x05 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x1d\n\tpage_size\x18\x06 \x01(\x03\x42\n\xfa\xc7\x31\x06\x30-1000\x12\x1d\n\npage_token\x18\x07 \x01(\tB\t\x8a\xc8\x31\x05<=100\x12\x1e\n\x16\x61lways_next_page_token\x18\x08 \x01(\x08\"v\n\x0bServiceType\x12\x1c\n\x18SERVICE_TYPE_UNSPECIFIED\x10\x00\x12\x0f\n\x0bMYSQL_ERROR\x10\x01\x12\x11\n\rMYSQL_GENERAL\x10\x02\x12\x14\n\x10MYSQL_SLOW_QUERY\x10\x03\x12\x0f\n\x0bMYSQL_AUDIT\x10\x04\"f\n\x17ListClusterLogsResponse\x12\x32\n\x04logs\x18\x01 \x03(\x0b\x32$.yandex.cloud.mdb.mysql.v1.LogRecord\x12\x17\n\x0fnext_page_token\x18\x02 \x01(\t\"b\n\x0fStreamLogRecord\x12\x34\n\x06record\x18\x01 \x01(\x0b\x32$.yandex.cloud.mdb.mysql.v1.LogRecord\x12\x19\n\x11next_record_token\x18\x02 \x01(\t\"\xbb\x03\n\x18StreamClusterLogsRequest\x12 \n\ncluster_id\x18\x01 \x01(\tB\x0c\xe8\xc7\x31\x01\x8a\xc8\x31\x04<=50\x12\x15\n\rcolumn_filter\x18\x02 \x03(\t\x12U\n\x0cservice_type\x18\x03 \x01(\x0e\x32?.yandex.cloud.mdb.mysql.v1.StreamClusterLogsRequest.ServiceType\x12-\n\tfrom_time\x18\x04 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12+\n\x07to_time\x18\x05 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x1f\n\x0crecord_token\x18\x06 \x01(\tB\t\x8a\xc8\x31\x05<=100\x12\x1a\n\x06\x66ilter\x18\x07 \x01(\tB\n\x8a\xc8\x31\x06<=1000\"v\n\x0bServiceType\x12\x1c\n\x18SERVICE_TYPE_UNSPECIFIED\x10\x00\x12\x0f\n\x0bMYSQL_ERROR\x10\x01\x12\x11\n\rMYSQL_GENERAL\x10\x02\x12\x14\n\x10MYSQL_SLOW_QUERY\x10\x03\x12\x0f\n\x0bMYSQL_AUDIT\x10\x04\"~\n\x1cListClusterOperationsRequest\x12 \n\ncluster_id\x18\x01 \x01(\tB\x0c\xe8\xc7\x31\x01\x8a\xc8\x31\x04<=50\x12\x1d\n\tpage_size\x18\x02 \x01(\x03\x42\n\xfa\xc7\x31\x06\x30-1000\x12\x1d\n\npage_token\x18\x03 \x01(\tB\t\x8a\xc8\x31\x05<=100\"o\n\x1dListClusterOperationsResponse\x12\x35\n\noperations\x18\x01 \x03(\x0b\x32!.yandex.cloud.operation.Operation\x12\x17\n\x0fnext_page_token\x18\x02 \x01(\t\"{\n\x19ListClusterBackupsRequest\x12 \n\ncluster_id\x18\x01 \x01(\tB\x0c\xe8\xc7\x31\x01\x8a\xc8\x31\x04<=50\x12\x1d\n\tpage_size\x18\x02 \x01(\x03\x42\n\xfa\xc7\x31\x06<=1000\x12\x1d\n\npage_token\x18\x03 \x01(\tB\t\x8a\xc8\x31\x05<=100\"i\n\x1aListClusterBackupsResponse\x12\x32\n\x07\x62\x61\x63kups\x18\x01 \x03(\x0b\x32!.yandex.cloud.mdb.mysql.v1.Backup\x12\x17\n\x0fnext_page_token\x18\x02 \x01(\t\"y\n\x17ListClusterHostsRequest\x12 \n\ncluster_id\x18\x01 \x01(\tB\x0c\xe8\xc7\x31\x01\x8a\xc8\x31\x04<=50\x12\x1d\n\tpage_size\x18\x02 \x01(\x03\x42\n\xfa\xc7\x31\x06\x30-1000\x12\x1d\n\npage_token\x18\x03 \x01(\tB\t\x8a\xc8\x31\x05<=100\"c\n\x18ListClusterHostsResponse\x12.\n\x05hosts\x18\x01 \x03(\x0b\x32\x1f.yandex.cloud.mdb.mysql.v1.Host\x12\x17\n\x0fnext_page_token\x18\x02 \x01(\t\"{\n\x16\x41\x64\x64\x43lusterHostsRequest\x12 \n\ncluster_id\x18\x01 \x01(\tB\x0c\xe8\xc7\x31\x01\x8a\xc8\x31\x04<=50\x12?\n\nhost_specs\x18\x02 \x03(\x0b\x32#.yandex.cloud.mdb.mysql.v1.HostSpecB\x06\x82\xc8\x31\x02>0\"A\n\x17\x41\x64\x64\x43lusterHostsMetadata\x12\x12\n\ncluster_id\x18\x01 \x01(\t\x12\x12\n\nhost_names\x18\x02 \x03(\t\"b\n\x19\x44\x65leteClusterHostsRequest\x12 \n\ncluster_id\x18\x01 \x01(\tB\x0c\xe8\xc7\x31\x01\x8a\xc8\x31\x04<=50\x12#\n\nhost_names\x18\x02 \x03(\tB\x0f\x82\xc8\x31\x02>0\x8a\xc8\x31\x05<=253\"D\n\x1a\x44\x65leteClusterHostsMetadata\x12\x12\n\ncluster_id\x18\x01 \x01(\t\x12\x12\n\nhost_names\x18\x02 \x03(\t\"7\n\x13StartClusterRequest\x12 \n\ncluster_id\x18\x01 \x01(\tB\x0c\xe8\xc7\x31\x01\x8a\xc8\x31\x04<=50\"*\n\x14StartClusterMetadata\x12\x12\n\ncluster_id\x18\x01 \x01(\t\"6\n\x12StopClusterRequest\x12 \n\ncluster_id\x18\x01 \x01(\tB\x0c\xe8\xc7\x31\x01\x8a\xc8\x31\x04<=50\")\n\x13StopClusterMetadata\x12\x12\n\ncluster_id\x18\x01 \x01(\t\"c\n\x12MoveClusterRequest\x12 \n\ncluster_id\x18\x01 \x01(\tB\x0c\xe8\xc7\x31\x01\x8a\xc8\x31\x04<=50\x12+\n\x15\x64\x65stination_folder_id\x18\x02 \x01(\tB\x0c\xe8\xc7\x31\x01\x8a\xc8\x31\x04<=50\"b\n\x13MoveClusterMetadata\x12\x12\n\ncluster_id\x18\x01 \x01(\t\x12\x18\n\x10source_folder_id\x18\x02 \x01(\t\x12\x1d\n\x15\x64\x65stination_folder_id\x18\x03 \x01(\t\"\x8b\x01\n\x19UpdateClusterHostsRequest\x12 \n\ncluster_id\x18\x01 \x01(\tB\x0c\xe8\xc7\x31\x01\x8a\xc8\x31\x04<=50\x12L\n\x11update_host_specs\x18\x02 \x03(\x0b\x32).yandex.cloud.mdb.mysql.v1.UpdateHostSpecB\x06\x82\xc8\x31\x02>0\"D\n\x1aUpdateClusterHostsMetadata\x12\x12\n\ncluster_id\x18\x01 \x01(\t\x12\x12\n\nhost_names\x18\x02 \x03(\t\"\xd1\x01\n\x0eUpdateHostSpec\x12\x17\n\thost_name\x18\x01 \x01(\tB\x04\xe8\xc7\x31\x01\x12\x1a\n\x12replication_source\x18\x02 \x01(\t\x12/\n\x0bupdate_mask\x18\x03 \x01(\x0b\x32\x1a.google.protobuf.FieldMask\x12\"\n\x0f\x62\x61\x63kup_priority\x18\x04 \x01(\x03\x42\t\xfa\xc7\x31\x05\x30-100\x12\x18\n\x10\x61ssign_public_ip\x18\x05 \x01(\x08\x12\x1b\n\x08priority\x18\x06 \x01(\x03\x42\t\xfa\xc7\x31\x05\x30-100\"\xb9\x01\n\x08HostSpec\x12\x19\n\x07zone_id\x18\x01 \x01(\tB\x08\x8a\xc8\x31\x04<=50\x12\x1b\n\tsubnet_id\x18\x02 \x01(\tB\x08\x8a\xc8\x31\x04<=50\x12\x18\n\x10\x61ssign_public_ip\x18\x03 \x01(\x08\x12\x1a\n\x12replication_source\x18\x04 \x01(\t\x12\"\n\x0f\x62\x61\x63kup_priority\x18\x05 \x01(\x03\x42\t\xfa\xc7\x31\x05\x30-100\x12\x1b\n\x08priority\x18\x06 \x01(\x03\x42\t\xfa\xc7\x31\x05\x30-100\"\xe0\x03\n\nConfigSpec\x12\x0f\n\x07version\x18\x01 \x01(\t\x12]\n\x10mysql_config_5_7\x18\x02 \x01(\x0b\x32\x30.yandex.cloud.mdb.mysql.v1.config.MysqlConfig5_7H\x00R\x0fmysqlConfig_5_7\x12]\n\x10mysql_config_8_0\x18\x06 \x01(\x0b\x32\x30.yandex.cloud.mdb.mysql.v1.config.MysqlConfig8_0H\x00R\x0fmysqlConfig_8_0\x12\x37\n\tresources\x18\x03 \x01(\x0b\x32$.yandex.cloud.mdb.mysql.v1.Resources\x12\x33\n\x13\x62\x61\x63kup_window_start\x18\x04 \x01(\x0b\x32\x16.google.type.TimeOfDay\x12\x31\n\x06\x61\x63\x63\x65ss\x18\x05 \x01(\x0b\x32!.yandex.cloud.mdb.mysql.v1.Access\x12R\n\x17performance_diagnostics\x18\x07 \x01(\x0b\x32\x31.yandex.cloud.mdb.mysql.v1.PerformanceDiagnosticsB\x0e\n\x0cmysql_config2\xca\x1d\n\x0e\x43lusterService\x12\x88\x01\n\x03Get\x12,.yandex.cloud.mdb.mysql.v1.GetClusterRequest\x1a\".yandex.cloud.mdb.mysql.v1.Cluster\"/\x82\xd3\xe4\x93\x02)\x12\'/managed-mysql/v1/clusters/{cluster_id}\x12\x8b\x01\n\x04List\x12..yandex.cloud.mdb.mysql.v1.ListClustersRequest\x1a/.yandex.cloud.mdb.mysql.v1.ListClustersResponse\"\"\x82\xd3\xe4\x93\x02\x1c\x12\x1a/managed-mysql/v1/clusters\x12\xa7\x01\n\x06\x43reate\x12/.yandex.cloud.mdb.mysql.v1.CreateClusterRequest\x1a!.yandex.cloud.operation.Operation\"I\x82\xd3\xe4\x93\x02\x1f\"\x1a/managed-mysql/v1/clusters:\x01*\xb2\xd2* \n\x15\x43reateClusterMetadata\x12\x07\x43luster\x12\xb4\x01\n\x06Update\x12/.yandex.cloud.mdb.mysql.v1.UpdateClusterRequest\x1a!.yandex.cloud.operation.Operation\"V\x82\xd3\xe4\x93\x02,2\'/managed-mysql/v1/clusters/{cluster_id}:\x01*\xb2\xd2* \n\x15UpdateClusterMetadata\x12\x07\x43luster\x12\xbf\x01\n\x06\x44\x65lete\x12/.yandex.cloud.mdb.mysql.v1.DeleteClusterRequest\x1a!.yandex.cloud.operation.Operation\"a\x82\xd3\xe4\x93\x02)*\'/managed-mysql/v1/clusters/{cluster_id}\xb2\xd2*.\n\x15\x44\x65leteClusterMetadata\x12\x15google.protobuf.Empty\x12\xb4\x01\n\x05Start\x12..yandex.cloud.mdb.mysql.v1.StartClusterRequest\x1a!.yandex.cloud.operation.Operation\"X\x82\xd3\xe4\x93\x02/\"-/managed-mysql/v1/clusters/{cluster_id}:start\xb2\xd2*\x1f\n\x14StartClusterMetadata\x12\x07\x43luster\x12\xb0\x01\n\x04Stop\x12-.yandex.cloud.mdb.mysql.v1.StopClusterRequest\x1a!.yandex.cloud.operation.Operation\"V\x82\xd3\xe4\x93\x02.\",/managed-mysql/v1/clusters/{cluster_id}:stop\xb2\xd2*\x1e\n\x13StopClusterMetadata\x12\x07\x43luster\x12\xb3\x01\n\x04Move\x12-.yandex.cloud.mdb.mysql.v1.MoveClusterRequest\x1a!.yandex.cloud.operation.Operation\"Y\x82\xd3\xe4\x93\x02\x31\",/managed-mysql/v1/clusters/{cluster_id}:move:\x01*\xb2\xd2*\x1e\n\x13MoveClusterMetadata\x12\x07\x43luster\x12\xb8\x01\n\x06\x42\x61\x63kup\x12/.yandex.cloud.mdb.mysql.v1.BackupClusterRequest\x1a!.yandex.cloud.operation.Operation\"Z\x82\xd3\xe4\x93\x02\x30\"./managed-mysql/v1/clusters/{cluster_id}:backup\xb2\xd2* \n\x15\x42\x61\x63kupClusterMetadata\x12\x07\x43luster\x12\xb2\x01\n\x07Restore\x12\x30.yandex.cloud.mdb.mysql.v1.RestoreClusterRequest\x1a!.yandex.cloud.operation.Operation\"R\x82\xd3\xe4\x93\x02\'\"\"/managed-mysql/v1/clusters:restore:\x01*\xb2\xd2*!\n\x16RestoreClusterMetadata\x12\x07\x43luster\x12\xe9\x01\n\x15RescheduleMaintenance\x12\x37.yandex.cloud.mdb.mysql.v1.RescheduleMaintenanceRequest\x1a!.yandex.cloud.operation.Operation\"t\x82\xd3\xe4\x93\x02\x42\"=/managed-mysql/v1/clusters/{cluster_id}:rescheduleMaintenance:\x01*\xb2\xd2*(\n\x1dRescheduleMaintenanceMetadata\x12\x07\x43luster\x12\xd7\x01\n\rStartFailover\x12\x36.yandex.cloud.mdb.mysql.v1.StartClusterFailoverRequest\x1a!.yandex.cloud.operation.Operation\"k\x82\xd3\xe4\x93\x02:\"5/managed-mysql/v1/clusters/{cluster_id}:startFailover:\x01*\xb2\xd2*\'\n\x1cStartClusterFailoverMetadata\x12\x07\x43luster\x12\xa7\x01\n\x08ListLogs\x12\x31.yandex.cloud.mdb.mysql.v1.ListClusterLogsRequest\x1a\x32.yandex.cloud.mdb.mysql.v1.ListClusterLogsResponse\"4\x82\xd3\xe4\x93\x02.\x12,/managed-mysql/v1/clusters/{cluster_id}:logs\x12\xac\x01\n\nStreamLogs\x12\x33.yandex.cloud.mdb.mysql.v1.StreamClusterLogsRequest\x1a*.yandex.cloud.mdb.mysql.v1.StreamLogRecord\";\x82\xd3\xe4\x93\x02\x35\x12\x33/managed-mysql/v1/clusters/{cluster_id}:stream_logs0\x01\x12\xbf\x01\n\x0eListOperations\x12\x37.yandex.cloud.mdb.mysql.v1.ListClusterOperationsRequest\x1a\x38.yandex.cloud.mdb.mysql.v1.ListClusterOperationsResponse\":\x82\xd3\xe4\x93\x02\x34\x12\x32/managed-mysql/v1/clusters/{cluster_id}/operations\x12\xb3\x01\n\x0bListBackups\x12\x34.yandex.cloud.mdb.mysql.v1.ListClusterBackupsRequest\x1a\x35.yandex.cloud.mdb.mysql.v1.ListClusterBackupsResponse\"7\x82\xd3\xe4\x93\x02\x31\x12//managed-mysql/v1/clusters/{cluster_id}/backups\x12\xab\x01\n\tListHosts\x12\x32.yandex.cloud.mdb.mysql.v1.ListClusterHostsRequest\x1a\x33.yandex.cloud.mdb.mysql.v1.ListClusterHostsResponse\"5\x82\xd3\xe4\x93\x02/\x12-/managed-mysql/v1/clusters/{cluster_id}/hosts\x12\xda\x01\n\x08\x41\x64\x64Hosts\x12\x31.yandex.cloud.mdb.mysql.v1.AddClusterHostsRequest\x1a!.yandex.cloud.operation.Operation\"x\x82\xd3\xe4\x93\x02>\"9/managed-mysql/v1/clusters/{cluster_id}/hosts:batchCreate:\x01*\xb2\xd2*0\n\x17\x41\x64\x64\x43lusterHostsMetadata\x12\x15google.protobuf.Empty\x12\xe3\x01\n\x0bUpdateHosts\x12\x34.yandex.cloud.mdb.mysql.v1.UpdateClusterHostsRequest\x1a!.yandex.cloud.operation.Operation\"{\x82\xd3\xe4\x93\x02>\"9/managed-mysql/v1/clusters/{cluster_id}/hosts:batchUpdate:\x01*\xb2\xd2*3\n\x1aUpdateClusterHostsMetadata\x12\x15google.protobuf.Empty\x12\xe3\x01\n\x0b\x44\x65leteHosts\x12\x34.yandex.cloud.mdb.mysql.v1.DeleteClusterHostsRequest\x1a!.yandex.cloud.operation.Operation\"{\x82\xd3\xe4\x93\x02>\"9/managed-mysql/v1/clusters/{cluster_id}/hosts:batchDelete:\x01*\xb2\xd2*3\n\x1a\x44\x65leteClusterHostsMetadata\x12\x15google.protobuf.EmptyBd\n\x1dyandex.cloud.api.mdb.mysql.v1ZCgithub.com/yandex-cloud/go-genproto/yandex/cloud/mdb/mysql/v1;mysqlb\x06proto3'
,
dependencies=[google_dot_api_dot_annotations__pb2.DESCRIPTOR,google_dot_protobuf_dot_field__mask__pb2.DESCRIPTOR,google_dot_protobuf_dot_timestamp__pb2.DESCRIPTOR,google_dot_type_dot_timeofday__pb2.DESCRIPTOR,yandex_dot_cloud_dot_api_dot_operation__pb2.DESCRIPTOR,yandex_dot_cloud_dot_operation_dot_operation__pb2.DESCRIPTOR,yandex_dot_cloud_dot_validation__pb2.DESCRIPTOR,yandex_dot_cloud_dot_mdb_dot_mysql_dot_v1_dot_backup__pb2.DESCRIPTOR,yandex_dot_cloud_dot_mdb_dot_mysql_dot_v1_dot_cluster__pb2.DESCRIPTOR,yandex_dot_cloud_dot_mdb_dot_mysql_dot_v1_dot_database__pb2.DESCRIPTOR,yandex_dot_cloud_dot_mdb_dot_mysql_dot_v1_dot_user__pb2.DESCRIPTOR,yandex_dot_cloud_dot_mdb_dot_mysql_dot_v1_dot_config_dot_mysql5__7__pb2.DESCRIPTOR,yandex_dot_cloud_dot_mdb_dot_mysql_dot_v1_dot_config_dot_mysql8__0__pb2.DESCRIPTOR,yandex_dot_cloud_dot_mdb_dot_mysql_dot_v1_dot_maintenance__pb2.DESCRIPTOR,])
_RESCHEDULEMAINTENANCEREQUEST_RESCHEDULETYPE = _descriptor.EnumDescriptor(
name='RescheduleType',
full_name='yandex.cloud.mdb.mysql.v1.RescheduleMaintenanceRequest.RescheduleType',
filename=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
values=[
_descriptor.EnumValueDescriptor(
name='RESCHEDULE_TYPE_UNSPECIFIED', index=0, number=0,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='IMMEDIATE', index=1, number=1,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='NEXT_AVAILABLE_WINDOW', index=2, number=2,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='SPECIFIC_TIME', index=3, number=3,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
],
containing_type=None,
serialized_options=None,
serialized_start=3564,
serialized_end=3674,
)
_sym_db.RegisterEnumDescriptor(_RESCHEDULEMAINTENANCEREQUEST_RESCHEDULETYPE)
_LISTCLUSTERLOGSREQUEST_SERVICETYPE = _descriptor.EnumDescriptor(
name='ServiceType',
full_name='yandex.cloud.mdb.mysql.v1.ListClusterLogsRequest.ServiceType',
filename=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
values=[
_descriptor.EnumValueDescriptor(
name='SERVICE_TYPE_UNSPECIFIED', index=0, number=0,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='MYSQL_ERROR', index=1, number=1,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='MYSQL_GENERAL', index=2, number=2,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='MYSQL_SLOW_QUERY', index=3, number=3,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='MYSQL_AUDIT', index=4, number=4,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
],
containing_type=None,
serialized_options=None,
serialized_start=4312,
serialized_end=4430,
)
_sym_db.RegisterEnumDescriptor(_LISTCLUSTERLOGSREQUEST_SERVICETYPE)
_STREAMCLUSTERLOGSREQUEST_SERVICETYPE = _descriptor.EnumDescriptor(
name='ServiceType',
full_name='yandex.cloud.mdb.mysql.v1.StreamClusterLogsRequest.ServiceType',
filename=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
values=[
_descriptor.EnumValueDescriptor(
name='SERVICE_TYPE_UNSPECIFIED', index=0, number=0,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='MYSQL_ERROR', index=1, number=1,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='MYSQL_GENERAL', index=2, number=2,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='MYSQL_SLOW_QUERY', index=3, number=3,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='MYSQL_AUDIT', index=4, number=4,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
],
containing_type=None,
serialized_options=None,
serialized_start=4312,
serialized_end=4430,
)
_sym_db.RegisterEnumDescriptor(_STREAMCLUSTERLOGSREQUEST_SERVICETYPE)
_GETCLUSTERREQUEST = _descriptor.Descriptor(
name='GetClusterRequest',
full_name='yandex.cloud.mdb.mysql.v1.GetClusterRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='cluster_id', full_name='yandex.cloud.mdb.mysql.v1.GetClusterRequest.cluster_id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\350\3071\001\212\3101\004<=50', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=613,
serialized_end=666,
)
_LISTCLUSTERSREQUEST = _descriptor.Descriptor(
name='ListClustersRequest',
full_name='yandex.cloud.mdb.mysql.v1.ListClustersRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='folder_id', full_name='yandex.cloud.mdb.mysql.v1.ListClustersRequest.folder_id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\350\3071\001\212\3101\004<=50', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='page_size', full_name='yandex.cloud.mdb.mysql.v1.ListClustersRequest.page_size', index=1,
number=2, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\372\3071\0060-1000', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='page_token', full_name='yandex.cloud.mdb.mysql.v1.ListClustersRequest.page_token', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\212\3101\005<=100', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='filter', full_name='yandex.cloud.mdb.mysql.v1.ListClustersRequest.filter', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\212\3101\006<=1000', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=669,
serialized_end=813,
)
_LISTCLUSTERSRESPONSE = _descriptor.Descriptor(
name='ListClustersResponse',
full_name='yandex.cloud.mdb.mysql.v1.ListClustersResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='clusters', full_name='yandex.cloud.mdb.mysql.v1.ListClustersResponse.clusters', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='next_page_token', full_name='yandex.cloud.mdb.mysql.v1.ListClustersResponse.next_page_token', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=815,
serialized_end=916,
)
_CREATECLUSTERREQUEST_LABELSENTRY = _descriptor.Descriptor(
name='LabelsEntry',
full_name='yandex.cloud.mdb.mysql.v1.CreateClusterRequest.LabelsEntry',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='key', full_name='yandex.cloud.mdb.mysql.v1.CreateClusterRequest.LabelsEntry.key', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='value', full_name='yandex.cloud.mdb.mysql.v1.CreateClusterRequest.LabelsEntry.value', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=b'8\001',
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1592,
serialized_end=1637,
)
_CREATECLUSTERREQUEST = _descriptor.Descriptor(
name='CreateClusterRequest',
full_name='yandex.cloud.mdb.mysql.v1.CreateClusterRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='folder_id', full_name='yandex.cloud.mdb.mysql.v1.CreateClusterRequest.folder_id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\350\3071\001\212\3101\004<=50', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='name', full_name='yandex.cloud.mdb.mysql.v1.CreateClusterRequest.name', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\350\3071\001\212\3101\004<=63\362\3071\016[a-zA-Z0-9_-]*', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='description', full_name='yandex.cloud.mdb.mysql.v1.CreateClusterRequest.description', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\212\3101\005<=256', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='labels', full_name='yandex.cloud.mdb.mysql.v1.CreateClusterRequest.labels', index=3,
number=4, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\202\3101\004<=64\212\3101\004<=63\362\3071\013[-_0-9a-z]*\262\3101\006\032\0041-63\262\3101\022\022\020[a-z][-_0-9a-z]*', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='environment', full_name='yandex.cloud.mdb.mysql.v1.CreateClusterRequest.environment', index=4,
number=5, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='config_spec', full_name='yandex.cloud.mdb.mysql.v1.CreateClusterRequest.config_spec', index=5,
number=6, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='database_specs', full_name='yandex.cloud.mdb.mysql.v1.CreateClusterRequest.database_specs', index=6,
number=7, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='user_specs', full_name='yandex.cloud.mdb.mysql.v1.CreateClusterRequest.user_specs', index=7,
number=8, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='host_specs', full_name='yandex.cloud.mdb.mysql.v1.CreateClusterRequest.host_specs', index=8,
number=9, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='network_id', full_name='yandex.cloud.mdb.mysql.v1.CreateClusterRequest.network_id', index=9,
number=10, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\212\3101\004<=50', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='security_group_ids', full_name='yandex.cloud.mdb.mysql.v1.CreateClusterRequest.security_group_ids', index=10,
number=11, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='deletion_protection', full_name='yandex.cloud.mdb.mysql.v1.CreateClusterRequest.deletion_protection', index=11,
number=12, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[_CREATECLUSTERREQUEST_LABELSENTRY, ],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=919,
serialized_end=1637,
)
_CREATECLUSTERMETADATA = _descriptor.Descriptor(
name='CreateClusterMetadata',
full_name='yandex.cloud.mdb.mysql.v1.CreateClusterMetadata',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='cluster_id', full_name='yandex.cloud.mdb.mysql.v1.CreateClusterMetadata.cluster_id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1639,
serialized_end=1682,
)
_UPDATECLUSTERREQUEST_LABELSENTRY = _descriptor.Descriptor(
name='LabelsEntry',
full_name='yandex.cloud.mdb.mysql.v1.UpdateClusterRequest.LabelsEntry',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='key', full_name='yandex.cloud.mdb.mysql.v1.UpdateClusterRequest.LabelsEntry.key', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='value', full_name='yandex.cloud.mdb.mysql.v1.UpdateClusterRequest.LabelsEntry.value', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=b'8\001',
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1592,
serialized_end=1637,
)
_UPDATECLUSTERREQUEST = _descriptor.Descriptor(
name='UpdateClusterRequest',
full_name='yandex.cloud.mdb.mysql.v1.UpdateClusterRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='cluster_id', full_name='yandex.cloud.mdb.mysql.v1.UpdateClusterRequest.cluster_id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\350\3071\001\212\3101\004<=50', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='update_mask', full_name='yandex.cloud.mdb.mysql.v1.UpdateClusterRequest.update_mask', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='description', full_name='yandex.cloud.mdb.mysql.v1.UpdateClusterRequest.description', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\212\3101\005<=256', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='labels', full_name='yandex.cloud.mdb.mysql.v1.UpdateClusterRequest.labels', index=3,
number=4, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\202\3101\004<=64\212\3101\004<=63\362\3071\013[-_0-9a-z]*\262\3101\006\032\0041-63\262\3101\022\022\020[a-z][-_0-9a-z]*', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='config_spec', full_name='yandex.cloud.mdb.mysql.v1.UpdateClusterRequest.config_spec', index=4,
number=5, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='name', full_name='yandex.cloud.mdb.mysql.v1.UpdateClusterRequest.name', index=5,
number=6, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\212\3101\004<=63\362\3071\016[a-zA-Z0-9_-]*', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='maintenance_window', full_name='yandex.cloud.mdb.mysql.v1.UpdateClusterRequest.maintenance_window', index=6,
number=7, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='security_group_ids', full_name='yandex.cloud.mdb.mysql.v1.UpdateClusterRequest.security_group_ids', index=7,
number=8, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='deletion_protection', full_name='yandex.cloud.mdb.mysql.v1.UpdateClusterRequest.deletion_protection', index=8,
number=9, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[_UPDATECLUSTERREQUEST_LABELSENTRY, ],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1685,
serialized_end=2245,
)
_UPDATECLUSTERMETADATA = _descriptor.Descriptor(
name='UpdateClusterMetadata',
full_name='yandex.cloud.mdb.mysql.v1.UpdateClusterMetadata',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='cluster_id', full_name='yandex.cloud.mdb.mysql.v1.UpdateClusterMetadata.cluster_id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=2247,
serialized_end=2290,
)
_DELETECLUSTERREQUEST = _descriptor.Descriptor(
name='DeleteClusterRequest',
full_name='yandex.cloud.mdb.mysql.v1.DeleteClusterRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='cluster_id', full_name='yandex.cloud.mdb.mysql.v1.DeleteClusterRequest.cluster_id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\350\3071\001\212\3101\004<=50', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=2292,
serialized_end=2348,
)
_DELETECLUSTERMETADATA = _descriptor.Descriptor(
name='DeleteClusterMetadata',
full_name='yandex.cloud.mdb.mysql.v1.DeleteClusterMetadata',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='cluster_id', full_name='yandex.cloud.mdb.mysql.v1.DeleteClusterMetadata.cluster_id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=2350,
serialized_end=2393,
)
_BACKUPCLUSTERREQUEST = _descriptor.Descriptor(
name='BackupClusterRequest',
full_name='yandex.cloud.mdb.mysql.v1.BackupClusterRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='cluster_id', full_name='yandex.cloud.mdb.mysql.v1.BackupClusterRequest.cluster_id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\350\3071\001\212\3101\004<=50', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=2395,
serialized_end=2451,
)
_BACKUPCLUSTERMETADATA = _descriptor.Descriptor(
name='BackupClusterMetadata',
full_name='yandex.cloud.mdb.mysql.v1.BackupClusterMetadata',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='cluster_id', full_name='yandex.cloud.mdb.mysql.v1.BackupClusterMetadata.cluster_id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=2453,
serialized_end=2496,
)
_RESTORECLUSTERREQUEST_LABELSENTRY = _descriptor.Descriptor(
name='LabelsEntry',
full_name='yandex.cloud.mdb.mysql.v1.RestoreClusterRequest.LabelsEntry',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='key', full_name='yandex.cloud.mdb.mysql.v1.RestoreClusterRequest.LabelsEntry.key', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='value', full_name='yandex.cloud.mdb.mysql.v1.RestoreClusterRequest.LabelsEntry.value', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=b'8\001',
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1592,
serialized_end=1637,
)
_RESTORECLUSTERREQUEST = _descriptor.Descriptor(
name='RestoreClusterRequest',
full_name='yandex.cloud.mdb.mysql.v1.RestoreClusterRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='backup_id', full_name='yandex.cloud.mdb.mysql.v1.RestoreClusterRequest.backup_id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\350\3071\001', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='time', full_name='yandex.cloud.mdb.mysql.v1.RestoreClusterRequest.time', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\350\3071\001', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='name', full_name='yandex.cloud.mdb.mysql.v1.RestoreClusterRequest.name', index=2,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\350\3071\001\362\3071\016[a-zA-Z0-9_-]*', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='description', full_name='yandex.cloud.mdb.mysql.v1.RestoreClusterRequest.description', index=3,
number=5, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\212\3101\005<=256', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='labels', full_name='yandex.cloud.mdb.mysql.v1.RestoreClusterRequest.labels', index=4,
number=6, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\202\3101\004<=64\212\3101\004<=63\362\3071\013[-_0-9a-z]*\262\3101\006\032\0041-63\262\3101\022\022\020[a-z][-_0-9a-z]*', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='environment', full_name='yandex.cloud.mdb.mysql.v1.RestoreClusterRequest.environment', index=5,
number=7, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='config_spec', full_name='yandex.cloud.mdb.mysql.v1.RestoreClusterRequest.config_spec', index=6,
number=8, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='host_specs', full_name='yandex.cloud.mdb.mysql.v1.RestoreClusterRequest.host_specs', index=7,
number=9, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='network_id', full_name='yandex.cloud.mdb.mysql.v1.RestoreClusterRequest.network_id', index=8,
number=10, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\212\3101\004<=50', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='folder_id', full_name='yandex.cloud.mdb.mysql.v1.RestoreClusterRequest.folder_id', index=9,
number=11, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\212\3101\004<=50', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='security_group_ids', full_name='yandex.cloud.mdb.mysql.v1.RestoreClusterRequest.security_group_ids', index=10,
number=12, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[_RESTORECLUSTERREQUEST_LABELSENTRY, ],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=2499,
serialized_end=3129,
)
_RESTORECLUSTERMETADATA = _descriptor.Descriptor(
name='RestoreClusterMetadata',
full_name='yandex.cloud.mdb.mysql.v1.RestoreClusterMetadata',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='cluster_id', full_name='yandex.cloud.mdb.mysql.v1.RestoreClusterMetadata.cluster_id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='backup_id', full_name='yandex.cloud.mdb.mysql.v1.RestoreClusterMetadata.backup_id', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=3131,
serialized_end=3194,
)
_STARTCLUSTERFAILOVERREQUEST = _descriptor.Descriptor(
name='StartClusterFailoverRequest',
full_name='yandex.cloud.mdb.mysql.v1.StartClusterFailoverRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='cluster_id', full_name='yandex.cloud.mdb.mysql.v1.StartClusterFailoverRequest.cluster_id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\350\3071\001\212\3101\004<=50', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='host_name', full_name='yandex.cloud.mdb.mysql.v1.StartClusterFailoverRequest.host_name', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\212\3101\005<=253', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=3196,
serialized_end=3289,
)
_STARTCLUSTERFAILOVERMETADATA = _descriptor.Descriptor(
name='StartClusterFailoverMetadata',
full_name='yandex.cloud.mdb.mysql.v1.StartClusterFailoverMetadata',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='cluster_id', full_name='yandex.cloud.mdb.mysql.v1.StartClusterFailoverMetadata.cluster_id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=3291,
serialized_end=3341,
)
_RESCHEDULEMAINTENANCEREQUEST = _descriptor.Descriptor(
name='RescheduleMaintenanceRequest',
full_name='yandex.cloud.mdb.mysql.v1.RescheduleMaintenanceRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='cluster_id', full_name='yandex.cloud.mdb.mysql.v1.RescheduleMaintenanceRequest.cluster_id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\350\3071\001\212\3101\004<=50', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='reschedule_type', full_name='yandex.cloud.mdb.mysql.v1.RescheduleMaintenanceRequest.reschedule_type', index=1,
number=2, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\350\3071\001', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='delayed_until', full_name='yandex.cloud.mdb.mysql.v1.RescheduleMaintenanceRequest.delayed_until', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
_RESCHEDULEMAINTENANCEREQUEST_RESCHEDULETYPE,
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=3344,
serialized_end=3674,
)
_RESCHEDULEMAINTENANCEMETADATA = _descriptor.Descriptor(
name='RescheduleMaintenanceMetadata',
full_name='yandex.cloud.mdb.mysql.v1.RescheduleMaintenanceMetadata',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='cluster_id', full_name='yandex.cloud.mdb.mysql.v1.RescheduleMaintenanceMetadata.cluster_id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='delayed_until', full_name='yandex.cloud.mdb.mysql.v1.RescheduleMaintenanceMetadata.delayed_until', index=1,
number=4, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=3676,
serialized_end=3778,
)
_LOGRECORD_MESSAGEENTRY = _descriptor.Descriptor(
name='MessageEntry',
full_name='yandex.cloud.mdb.mysql.v1.LogRecord.MessageEntry',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='key', full_name='yandex.cloud.mdb.mysql.v1.LogRecord.MessageEntry.key', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='value', full_name='yandex.cloud.mdb.mysql.v1.LogRecord.MessageEntry.value', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=b'8\001',
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=3909,
serialized_end=3955,
)
_LOGRECORD = _descriptor.Descriptor(
name='LogRecord',
full_name='yandex.cloud.mdb.mysql.v1.LogRecord',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='timestamp', full_name='yandex.cloud.mdb.mysql.v1.LogRecord.timestamp', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='message', full_name='yandex.cloud.mdb.mysql.v1.LogRecord.message', index=1,
number=2, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[_LOGRECORD_MESSAGEENTRY, ],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=3781,
serialized_end=3955,
)
_LISTCLUSTERLOGSREQUEST = _descriptor.Descriptor(
name='ListClusterLogsRequest',
full_name='yandex.cloud.mdb.mysql.v1.ListClusterLogsRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='cluster_id', full_name='yandex.cloud.mdb.mysql.v1.ListClusterLogsRequest.cluster_id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\350\3071\001\212\3101\004<=50', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='column_filter', full_name='yandex.cloud.mdb.mysql.v1.ListClusterLogsRequest.column_filter', index=1,
number=2, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='service_type', full_name='yandex.cloud.mdb.mysql.v1.ListClusterLogsRequest.service_type', index=2,
number=3, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='from_time', full_name='yandex.cloud.mdb.mysql.v1.ListClusterLogsRequest.from_time', index=3,
number=4, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='to_time', full_name='yandex.cloud.mdb.mysql.v1.ListClusterLogsRequest.to_time', index=4,
number=5, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='page_size', full_name='yandex.cloud.mdb.mysql.v1.ListClusterLogsRequest.page_size', index=5,
number=6, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\372\3071\0060-1000', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='page_token', full_name='yandex.cloud.mdb.mysql.v1.ListClusterLogsRequest.page_token', index=6,
number=7, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\212\3101\005<=100', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='always_next_page_token', full_name='yandex.cloud.mdb.mysql.v1.ListClusterLogsRequest.always_next_page_token', index=7,
number=8, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
_LISTCLUSTERLOGSREQUEST_SERVICETYPE,
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=3958,
serialized_end=4430,
)
_LISTCLUSTERLOGSRESPONSE = _descriptor.Descriptor(
name='ListClusterLogsResponse',
full_name='yandex.cloud.mdb.mysql.v1.ListClusterLogsResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='logs', full_name='yandex.cloud.mdb.mysql.v1.ListClusterLogsResponse.logs', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='next_page_token', full_name='yandex.cloud.mdb.mysql.v1.ListClusterLogsResponse.next_page_token', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=4432,
serialized_end=4534,
)
_STREAMLOGRECORD = _descriptor.Descriptor(
name='StreamLogRecord',
full_name='yandex.cloud.mdb.mysql.v1.StreamLogRecord',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='record', full_name='yandex.cloud.mdb.mysql.v1.StreamLogRecord.record', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='next_record_token', full_name='yandex.cloud.mdb.mysql.v1.StreamLogRecord.next_record_token', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=4536,
serialized_end=4634,
)
_STREAMCLUSTERLOGSREQUEST = _descriptor.Descriptor(
name='StreamClusterLogsRequest',
full_name='yandex.cloud.mdb.mysql.v1.StreamClusterLogsRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='cluster_id', full_name='yandex.cloud.mdb.mysql.v1.StreamClusterLogsRequest.cluster_id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\350\3071\001\212\3101\004<=50', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='column_filter', full_name='yandex.cloud.mdb.mysql.v1.StreamClusterLogsRequest.column_filter', index=1,
number=2, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='service_type', full_name='yandex.cloud.mdb.mysql.v1.StreamClusterLogsRequest.service_type', index=2,
number=3, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='from_time', full_name='yandex.cloud.mdb.mysql.v1.StreamClusterLogsRequest.from_time', index=3,
number=4, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='to_time', full_name='yandex.cloud.mdb.mysql.v1.StreamClusterLogsRequest.to_time', index=4,
number=5, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='record_token', full_name='yandex.cloud.mdb.mysql.v1.StreamClusterLogsRequest.record_token', index=5,
number=6, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\212\3101\005<=100', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='filter', full_name='yandex.cloud.mdb.mysql.v1.StreamClusterLogsRequest.filter', index=6,
number=7, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\212\3101\006<=1000', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
_STREAMCLUSTERLOGSREQUEST_SERVICETYPE,
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=4637,
serialized_end=5080,
)
_LISTCLUSTEROPERATIONSREQUEST = _descriptor.Descriptor(
name='ListClusterOperationsRequest',
full_name='yandex.cloud.mdb.mysql.v1.ListClusterOperationsRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='cluster_id', full_name='yandex.cloud.mdb.mysql.v1.ListClusterOperationsRequest.cluster_id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\350\3071\001\212\3101\004<=50', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='page_size', full_name='yandex.cloud.mdb.mysql.v1.ListClusterOperationsRequest.page_size', index=1,
number=2, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\372\3071\0060-1000', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='page_token', full_name='yandex.cloud.mdb.mysql.v1.ListClusterOperationsRequest.page_token', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\212\3101\005<=100', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=5082,
serialized_end=5208,
)
_LISTCLUSTEROPERATIONSRESPONSE = _descriptor.Descriptor(
name='ListClusterOperationsResponse',
full_name='yandex.cloud.mdb.mysql.v1.ListClusterOperationsResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='operations', full_name='yandex.cloud.mdb.mysql.v1.ListClusterOperationsResponse.operations', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='next_page_token', full_name='yandex.cloud.mdb.mysql.v1.ListClusterOperationsResponse.next_page_token', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=5210,
serialized_end=5321,
)
_LISTCLUSTERBACKUPSREQUEST = _descriptor.Descriptor(
name='ListClusterBackupsRequest',
full_name='yandex.cloud.mdb.mysql.v1.ListClusterBackupsRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='cluster_id', full_name='yandex.cloud.mdb.mysql.v1.ListClusterBackupsRequest.cluster_id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\350\3071\001\212\3101\004<=50', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='page_size', full_name='yandex.cloud.mdb.mysql.v1.ListClusterBackupsRequest.page_size', index=1,
number=2, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\372\3071\006<=1000', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='page_token', full_name='yandex.cloud.mdb.mysql.v1.ListClusterBackupsRequest.page_token', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\212\3101\005<=100', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=5323,
serialized_end=5446,
)
_LISTCLUSTERBACKUPSRESPONSE = _descriptor.Descriptor(
name='ListClusterBackupsResponse',
full_name='yandex.cloud.mdb.mysql.v1.ListClusterBackupsResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='backups', full_name='yandex.cloud.mdb.mysql.v1.ListClusterBackupsResponse.backups', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='next_page_token', full_name='yandex.cloud.mdb.mysql.v1.ListClusterBackupsResponse.next_page_token', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=5448,
serialized_end=5553,
)
_LISTCLUSTERHOSTSREQUEST = _descriptor.Descriptor(
name='ListClusterHostsRequest',
full_name='yandex.cloud.mdb.mysql.v1.ListClusterHostsRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='cluster_id', full_name='yandex.cloud.mdb.mysql.v1.ListClusterHostsRequest.cluster_id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\350\3071\001\212\3101\004<=50', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='page_size', full_name='yandex.cloud.mdb.mysql.v1.ListClusterHostsRequest.page_size', index=1,
number=2, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\372\3071\0060-1000', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='page_token', full_name='yandex.cloud.mdb.mysql.v1.ListClusterHostsRequest.page_token', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\212\3101\005<=100', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=5555,
serialized_end=5676,
)
_LISTCLUSTERHOSTSRESPONSE = _descriptor.Descriptor(
name='ListClusterHostsResponse',
full_name='yandex.cloud.mdb.mysql.v1.ListClusterHostsResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='hosts', full_name='yandex.cloud.mdb.mysql.v1.ListClusterHostsResponse.hosts', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='next_page_token', full_name='yandex.cloud.mdb.mysql.v1.ListClusterHostsResponse.next_page_token', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=5678,
serialized_end=5777,
)
_ADDCLUSTERHOSTSREQUEST = _descriptor.Descriptor(
name='AddClusterHostsRequest',
full_name='yandex.cloud.mdb.mysql.v1.AddClusterHostsRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='cluster_id', full_name='yandex.cloud.mdb.mysql.v1.AddClusterHostsRequest.cluster_id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\350\3071\001\212\3101\004<=50', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='host_specs', full_name='yandex.cloud.mdb.mysql.v1.AddClusterHostsRequest.host_specs', index=1,
number=2, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\202\3101\002>0', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=5779,
serialized_end=5902,
)
_ADDCLUSTERHOSTSMETADATA = _descriptor.Descriptor(
name='AddClusterHostsMetadata',
full_name='yandex.cloud.mdb.mysql.v1.AddClusterHostsMetadata',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='cluster_id', full_name='yandex.cloud.mdb.mysql.v1.AddClusterHostsMetadata.cluster_id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='host_names', full_name='yandex.cloud.mdb.mysql.v1.AddClusterHostsMetadata.host_names', index=1,
number=2, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=5904,
serialized_end=5969,
)
_DELETECLUSTERHOSTSREQUEST = _descriptor.Descriptor(
name='DeleteClusterHostsRequest',
full_name='yandex.cloud.mdb.mysql.v1.DeleteClusterHostsRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='cluster_id', full_name='yandex.cloud.mdb.mysql.v1.DeleteClusterHostsRequest.cluster_id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\350\3071\001\212\3101\004<=50', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='host_names', full_name='yandex.cloud.mdb.mysql.v1.DeleteClusterHostsRequest.host_names', index=1,
number=2, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\202\3101\002>0\212\3101\005<=253', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=5971,
serialized_end=6069,
)
_DELETECLUSTERHOSTSMETADATA = _descriptor.Descriptor(
name='DeleteClusterHostsMetadata',
full_name='yandex.cloud.mdb.mysql.v1.DeleteClusterHostsMetadata',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='cluster_id', full_name='yandex.cloud.mdb.mysql.v1.DeleteClusterHostsMetadata.cluster_id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='host_names', full_name='yandex.cloud.mdb.mysql.v1.DeleteClusterHostsMetadata.host_names', index=1,
number=2, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=6071,
serialized_end=6139,
)
_STARTCLUSTERREQUEST = _descriptor.Descriptor(
name='StartClusterRequest',
full_name='yandex.cloud.mdb.mysql.v1.StartClusterRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='cluster_id', full_name='yandex.cloud.mdb.mysql.v1.StartClusterRequest.cluster_id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\350\3071\001\212\3101\004<=50', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=6141,
serialized_end=6196,
)
_STARTCLUSTERMETADATA = _descriptor.Descriptor(
name='StartClusterMetadata',
full_name='yandex.cloud.mdb.mysql.v1.StartClusterMetadata',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='cluster_id', full_name='yandex.cloud.mdb.mysql.v1.StartClusterMetadata.cluster_id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=6198,
serialized_end=6240,
)
_STOPCLUSTERREQUEST = _descriptor.Descriptor(
name='StopClusterRequest',
full_name='yandex.cloud.mdb.mysql.v1.StopClusterRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='cluster_id', full_name='yandex.cloud.mdb.mysql.v1.StopClusterRequest.cluster_id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\350\3071\001\212\3101\004<=50', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=6242,
serialized_end=6296,
)
_STOPCLUSTERMETADATA = _descriptor.Descriptor(
name='StopClusterMetadata',
full_name='yandex.cloud.mdb.mysql.v1.StopClusterMetadata',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='cluster_id', full_name='yandex.cloud.mdb.mysql.v1.StopClusterMetadata.cluster_id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=6298,
serialized_end=6339,
)
_MOVECLUSTERREQUEST = _descriptor.Descriptor(
name='MoveClusterRequest',
full_name='yandex.cloud.mdb.mysql.v1.MoveClusterRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='cluster_id', full_name='yandex.cloud.mdb.mysql.v1.MoveClusterRequest.cluster_id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\350\3071\001\212\3101\004<=50', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='destination_folder_id', full_name='yandex.cloud.mdb.mysql.v1.MoveClusterRequest.destination_folder_id', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\350\3071\001\212\3101\004<=50', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=6341,
serialized_end=6440,
)
_MOVECLUSTERMETADATA = _descriptor.Descriptor(
name='MoveClusterMetadata',
full_name='yandex.cloud.mdb.mysql.v1.MoveClusterMetadata',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='cluster_id', full_name='yandex.cloud.mdb.mysql.v1.MoveClusterMetadata.cluster_id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='source_folder_id', full_name='yandex.cloud.mdb.mysql.v1.MoveClusterMetadata.source_folder_id', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='destination_folder_id', full_name='yandex.cloud.mdb.mysql.v1.MoveClusterMetadata.destination_folder_id', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=6442,
serialized_end=6540,
)
_UPDATECLUSTERHOSTSREQUEST = _descriptor.Descriptor(
name='UpdateClusterHostsRequest',
full_name='yandex.cloud.mdb.mysql.v1.UpdateClusterHostsRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='cluster_id', full_name='yandex.cloud.mdb.mysql.v1.UpdateClusterHostsRequest.cluster_id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\350\3071\001\212\3101\004<=50', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='update_host_specs', full_name='yandex.cloud.mdb.mysql.v1.UpdateClusterHostsRequest.update_host_specs', index=1,
number=2, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\202\3101\002>0', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=6543,
serialized_end=6682,
)
_UPDATECLUSTERHOSTSMETADATA = _descriptor.Descriptor(
name='UpdateClusterHostsMetadata',
full_name='yandex.cloud.mdb.mysql.v1.UpdateClusterHostsMetadata',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='cluster_id', full_name='yandex.cloud.mdb.mysql.v1.UpdateClusterHostsMetadata.cluster_id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='host_names', full_name='yandex.cloud.mdb.mysql.v1.UpdateClusterHostsMetadata.host_names', index=1,
number=2, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=6684,
serialized_end=6752,
)
_UPDATEHOSTSPEC = _descriptor.Descriptor(
name='UpdateHostSpec',
full_name='yandex.cloud.mdb.mysql.v1.UpdateHostSpec',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='host_name', full_name='yandex.cloud.mdb.mysql.v1.UpdateHostSpec.host_name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\350\3071\001', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='replication_source', full_name='yandex.cloud.mdb.mysql.v1.UpdateHostSpec.replication_source', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='update_mask', full_name='yandex.cloud.mdb.mysql.v1.UpdateHostSpec.update_mask', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='backup_priority', full_name='yandex.cloud.mdb.mysql.v1.UpdateHostSpec.backup_priority', index=3,
number=4, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\372\3071\0050-100', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='assign_public_ip', full_name='yandex.cloud.mdb.mysql.v1.UpdateHostSpec.assign_public_ip', index=4,
number=5, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='priority', full_name='yandex.cloud.mdb.mysql.v1.UpdateHostSpec.priority', index=5,
number=6, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\372\3071\0050-100', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=6755,
serialized_end=6964,
)
_HOSTSPEC = _descriptor.Descriptor(
name='HostSpec',
full_name='yandex.cloud.mdb.mysql.v1.HostSpec',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='zone_id', full_name='yandex.cloud.mdb.mysql.v1.HostSpec.zone_id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\212\3101\004<=50', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='subnet_id', full_name='yandex.cloud.mdb.mysql.v1.HostSpec.subnet_id', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\212\3101\004<=50', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='assign_public_ip', full_name='yandex.cloud.mdb.mysql.v1.HostSpec.assign_public_ip', index=2,
number=3, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='replication_source', full_name='yandex.cloud.mdb.mysql.v1.HostSpec.replication_source', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='backup_priority', full_name='yandex.cloud.mdb.mysql.v1.HostSpec.backup_priority', index=4,
number=5, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\372\3071\0050-100', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='priority', full_name='yandex.cloud.mdb.mysql.v1.HostSpec.priority', index=5,
number=6, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\372\3071\0050-100', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=6967,
serialized_end=7152,
)
_CONFIGSPEC = _descriptor.Descriptor(
name='ConfigSpec',
full_name='yandex.cloud.mdb.mysql.v1.ConfigSpec',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='version', full_name='yandex.cloud.mdb.mysql.v1.ConfigSpec.version', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='mysql_config_5_7', full_name='yandex.cloud.mdb.mysql.v1.ConfigSpec.mysql_config_5_7', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, json_name='mysqlConfig_5_7', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='mysql_config_8_0', full_name='yandex.cloud.mdb.mysql.v1.ConfigSpec.mysql_config_8_0', index=2,
number=6, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, json_name='mysqlConfig_8_0', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='resources', full_name='yandex.cloud.mdb.mysql.v1.ConfigSpec.resources', index=3,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='backup_window_start', full_name='yandex.cloud.mdb.mysql.v1.ConfigSpec.backup_window_start', index=4,
number=4, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='access', full_name='yandex.cloud.mdb.mysql.v1.ConfigSpec.access', index=5,
number=5, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='performance_diagnostics', full_name='yandex.cloud.mdb.mysql.v1.ConfigSpec.performance_diagnostics', index=6,
number=7, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
_descriptor.OneofDescriptor(
name='mysql_config', full_name='yandex.cloud.mdb.mysql.v1.ConfigSpec.mysql_config',
index=0, containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[]),
],
serialized_start=7155,
serialized_end=7635,
)
_LISTCLUSTERSRESPONSE.fields_by_name['clusters'].message_type = yandex_dot_cloud_dot_mdb_dot_mysql_dot_v1_dot_cluster__pb2._CLUSTER
_CREATECLUSTERREQUEST_LABELSENTRY.containing_type = _CREATECLUSTERREQUEST
_CREATECLUSTERREQUEST.fields_by_name['labels'].message_type = _CREATECLUSTERREQUEST_LABELSENTRY
_CREATECLUSTERREQUEST.fields_by_name['environment'].enum_type = yandex_dot_cloud_dot_mdb_dot_mysql_dot_v1_dot_cluster__pb2._CLUSTER_ENVIRONMENT
_CREATECLUSTERREQUEST.fields_by_name['config_spec'].message_type = _CONFIGSPEC
_CREATECLUSTERREQUEST.fields_by_name['database_specs'].message_type = yandex_dot_cloud_dot_mdb_dot_mysql_dot_v1_dot_database__pb2._DATABASESPEC
_CREATECLUSTERREQUEST.fields_by_name['user_specs'].message_type = yandex_dot_cloud_dot_mdb_dot_mysql_dot_v1_dot_user__pb2._USERSPEC
_CREATECLUSTERREQUEST.fields_by_name['host_specs'].message_type = _HOSTSPEC
_UPDATECLUSTERREQUEST_LABELSENTRY.containing_type = _UPDATECLUSTERREQUEST
_UPDATECLUSTERREQUEST.fields_by_name['update_mask'].message_type = google_dot_protobuf_dot_field__mask__pb2._FIELDMASK
_UPDATECLUSTERREQUEST.fields_by_name['labels'].message_type = _UPDATECLUSTERREQUEST_LABELSENTRY
_UPDATECLUSTERREQUEST.fields_by_name['config_spec'].message_type = _CONFIGSPEC
_UPDATECLUSTERREQUEST.fields_by_name['maintenance_window'].message_type = yandex_dot_cloud_dot_mdb_dot_mysql_dot_v1_dot_maintenance__pb2._MAINTENANCEWINDOW
_RESTORECLUSTERREQUEST_LABELSENTRY.containing_type = _RESTORECLUSTERREQUEST
_RESTORECLUSTERREQUEST.fields_by_name['time'].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP
_RESTORECLUSTERREQUEST.fields_by_name['labels'].message_type = _RESTORECLUSTERREQUEST_LABELSENTRY
_RESTORECLUSTERREQUEST.fields_by_name['environment'].enum_type = yandex_dot_cloud_dot_mdb_dot_mysql_dot_v1_dot_cluster__pb2._CLUSTER_ENVIRONMENT
_RESTORECLUSTERREQUEST.fields_by_name['config_spec'].message_type = _CONFIGSPEC
_RESTORECLUSTERREQUEST.fields_by_name['host_specs'].message_type = _HOSTSPEC
_RESCHEDULEMAINTENANCEREQUEST.fields_by_name['reschedule_type'].enum_type = _RESCHEDULEMAINTENANCEREQUEST_RESCHEDULETYPE
_RESCHEDULEMAINTENANCEREQUEST.fields_by_name['delayed_until'].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP
_RESCHEDULEMAINTENANCEREQUEST_RESCHEDULETYPE.containing_type = _RESCHEDULEMAINTENANCEREQUEST
_RESCHEDULEMAINTENANCEMETADATA.fields_by_name['delayed_until'].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP
_LOGRECORD_MESSAGEENTRY.containing_type = _LOGRECORD
_LOGRECORD.fields_by_name['timestamp'].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP
_LOGRECORD.fields_by_name['message'].message_type = _LOGRECORD_MESSAGEENTRY
_LISTCLUSTERLOGSREQUEST.fields_by_name['service_type'].enum_type = _LISTCLUSTERLOGSREQUEST_SERVICETYPE
_LISTCLUSTERLOGSREQUEST.fields_by_name['from_time'].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP
_LISTCLUSTERLOGSREQUEST.fields_by_name['to_time'].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP
_LISTCLUSTERLOGSREQUEST_SERVICETYPE.containing_type = _LISTCLUSTERLOGSREQUEST
_LISTCLUSTERLOGSRESPONSE.fields_by_name['logs'].message_type = _LOGRECORD
_STREAMLOGRECORD.fields_by_name['record'].message_type = _LOGRECORD
_STREAMCLUSTERLOGSREQUEST.fields_by_name['service_type'].enum_type = _STREAMCLUSTERLOGSREQUEST_SERVICETYPE
_STREAMCLUSTERLOGSREQUEST.fields_by_name['from_time'].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP
_STREAMCLUSTERLOGSREQUEST.fields_by_name['to_time'].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP
_STREAMCLUSTERLOGSREQUEST_SERVICETYPE.containing_type = _STREAMCLUSTERLOGSREQUEST
_LISTCLUSTEROPERATIONSRESPONSE.fields_by_name['operations'].message_type = yandex_dot_cloud_dot_operation_dot_operation__pb2._OPERATION
_LISTCLUSTERBACKUPSRESPONSE.fields_by_name['backups'].message_type = yandex_dot_cloud_dot_mdb_dot_mysql_dot_v1_dot_backup__pb2._BACKUP
_LISTCLUSTERHOSTSRESPONSE.fields_by_name['hosts'].message_type = yandex_dot_cloud_dot_mdb_dot_mysql_dot_v1_dot_cluster__pb2._HOST
_ADDCLUSTERHOSTSREQUEST.fields_by_name['host_specs'].message_type = _HOSTSPEC
_UPDATECLUSTERHOSTSREQUEST.fields_by_name['update_host_specs'].message_type = _UPDATEHOSTSPEC
_UPDATEHOSTSPEC.fields_by_name['update_mask'].message_type = google_dot_protobuf_dot_field__mask__pb2._FIELDMASK
_CONFIGSPEC.fields_by_name['mysql_config_5_7'].message_type = yandex_dot_cloud_dot_mdb_dot_mysql_dot_v1_dot_config_dot_mysql5__7__pb2._MYSQLCONFIG5_7
_CONFIGSPEC.fields_by_name['mysql_config_8_0'].message_type = yandex_dot_cloud_dot_mdb_dot_mysql_dot_v1_dot_config_dot_mysql8__0__pb2._MYSQLCONFIG8_0
_CONFIGSPEC.fields_by_name['resources'].message_type = yandex_dot_cloud_dot_mdb_dot_mysql_dot_v1_dot_cluster__pb2._RESOURCES
_CONFIGSPEC.fields_by_name['backup_window_start'].message_type = google_dot_type_dot_timeofday__pb2._TIMEOFDAY
_CONFIGSPEC.fields_by_name['access'].message_type = yandex_dot_cloud_dot_mdb_dot_mysql_dot_v1_dot_cluster__pb2._ACCESS
_CONFIGSPEC.fields_by_name['performance_diagnostics'].message_type = yandex_dot_cloud_dot_mdb_dot_mysql_dot_v1_dot_cluster__pb2._PERFORMANCEDIAGNOSTICS
_CONFIGSPEC.oneofs_by_name['mysql_config'].fields.append(
_CONFIGSPEC.fields_by_name['mysql_config_5_7'])
_CONFIGSPEC.fields_by_name['mysql_config_5_7'].containing_oneof = _CONFIGSPEC.oneofs_by_name['mysql_config']
_CONFIGSPEC.oneofs_by_name['mysql_config'].fields.append(
_CONFIGSPEC.fields_by_name['mysql_config_8_0'])
_CONFIGSPEC.fields_by_name['mysql_config_8_0'].containing_oneof = _CONFIGSPEC.oneofs_by_name['mysql_config']
DESCRIPTOR.message_types_by_name['GetClusterRequest'] = _GETCLUSTERREQUEST
DESCRIPTOR.message_types_by_name['ListClustersRequest'] = _LISTCLUSTERSREQUEST
DESCRIPTOR.message_types_by_name['ListClustersResponse'] = _LISTCLUSTERSRESPONSE
DESCRIPTOR.message_types_by_name['CreateClusterRequest'] = _CREATECLUSTERREQUEST
DESCRIPTOR.message_types_by_name['CreateClusterMetadata'] = _CREATECLUSTERMETADATA
DESCRIPTOR.message_types_by_name['UpdateClusterRequest'] = _UPDATECLUSTERREQUEST
DESCRIPTOR.message_types_by_name['UpdateClusterMetadata'] = _UPDATECLUSTERMETADATA
DESCRIPTOR.message_types_by_name['DeleteClusterRequest'] = _DELETECLUSTERREQUEST
DESCRIPTOR.message_types_by_name['DeleteClusterMetadata'] = _DELETECLUSTERMETADATA
DESCRIPTOR.message_types_by_name['BackupClusterRequest'] = _BACKUPCLUSTERREQUEST
DESCRIPTOR.message_types_by_name['BackupClusterMetadata'] = _BACKUPCLUSTERMETADATA
DESCRIPTOR.message_types_by_name['RestoreClusterRequest'] = _RESTORECLUSTERREQUEST
DESCRIPTOR.message_types_by_name['RestoreClusterMetadata'] = _RESTORECLUSTERMETADATA
DESCRIPTOR.message_types_by_name['StartClusterFailoverRequest'] = _STARTCLUSTERFAILOVERREQUEST
DESCRIPTOR.message_types_by_name['StartClusterFailoverMetadata'] = _STARTCLUSTERFAILOVERMETADATA
DESCRIPTOR.message_types_by_name['RescheduleMaintenanceRequest'] = _RESCHEDULEMAINTENANCEREQUEST
DESCRIPTOR.message_types_by_name['RescheduleMaintenanceMetadata'] = _RESCHEDULEMAINTENANCEMETADATA
DESCRIPTOR.message_types_by_name['LogRecord'] = _LOGRECORD
DESCRIPTOR.message_types_by_name['ListClusterLogsRequest'] = _LISTCLUSTERLOGSREQUEST
DESCRIPTOR.message_types_by_name['ListClusterLogsResponse'] = _LISTCLUSTERLOGSRESPONSE
DESCRIPTOR.message_types_by_name['StreamLogRecord'] = _STREAMLOGRECORD
DESCRIPTOR.message_types_by_name['StreamClusterLogsRequest'] = _STREAMCLUSTERLOGSREQUEST
DESCRIPTOR.message_types_by_name['ListClusterOperationsRequest'] = _LISTCLUSTEROPERATIONSREQUEST
DESCRIPTOR.message_types_by_name['ListClusterOperationsResponse'] = _LISTCLUSTEROPERATIONSRESPONSE
DESCRIPTOR.message_types_by_name['ListClusterBackupsRequest'] = _LISTCLUSTERBACKUPSREQUEST
DESCRIPTOR.message_types_by_name['ListClusterBackupsResponse'] = _LISTCLUSTERBACKUPSRESPONSE
DESCRIPTOR.message_types_by_name['ListClusterHostsRequest'] = _LISTCLUSTERHOSTSREQUEST
DESCRIPTOR.message_types_by_name['ListClusterHostsResponse'] = _LISTCLUSTERHOSTSRESPONSE
DESCRIPTOR.message_types_by_name['AddClusterHostsRequest'] = _ADDCLUSTERHOSTSREQUEST
DESCRIPTOR.message_types_by_name['AddClusterHostsMetadata'] = _ADDCLUSTERHOSTSMETADATA
DESCRIPTOR.message_types_by_name['DeleteClusterHostsRequest'] = _DELETECLUSTERHOSTSREQUEST
DESCRIPTOR.message_types_by_name['DeleteClusterHostsMetadata'] = _DELETECLUSTERHOSTSMETADATA
DESCRIPTOR.message_types_by_name['StartClusterRequest'] = _STARTCLUSTERREQUEST
DESCRIPTOR.message_types_by_name['StartClusterMetadata'] = _STARTCLUSTERMETADATA
DESCRIPTOR.message_types_by_name['StopClusterRequest'] = _STOPCLUSTERREQUEST
DESCRIPTOR.message_types_by_name['StopClusterMetadata'] = _STOPCLUSTERMETADATA
DESCRIPTOR.message_types_by_name['MoveClusterRequest'] = _MOVECLUSTERREQUEST
DESCRIPTOR.message_types_by_name['MoveClusterMetadata'] = _MOVECLUSTERMETADATA
DESCRIPTOR.message_types_by_name['UpdateClusterHostsRequest'] = _UPDATECLUSTERHOSTSREQUEST
DESCRIPTOR.message_types_by_name['UpdateClusterHostsMetadata'] = _UPDATECLUSTERHOSTSMETADATA
DESCRIPTOR.message_types_by_name['UpdateHostSpec'] = _UPDATEHOSTSPEC
DESCRIPTOR.message_types_by_name['HostSpec'] = _HOSTSPEC
DESCRIPTOR.message_types_by_name['ConfigSpec'] = _CONFIGSPEC
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
GetClusterRequest = _reflection.GeneratedProtocolMessageType('GetClusterRequest', (_message.Message,), {
'DESCRIPTOR' : _GETCLUSTERREQUEST,
'__module__' : 'yandex.cloud.mdb.mysql.v1.cluster_service_pb2'
# @@protoc_insertion_point(class_scope:yandex.cloud.mdb.mysql.v1.GetClusterRequest)
})
_sym_db.RegisterMessage(GetClusterRequest)
ListClustersRequest = _reflection.GeneratedProtocolMessageType('ListClustersRequest', (_message.Message,), {
'DESCRIPTOR' : _LISTCLUSTERSREQUEST,
'__module__' : 'yandex.cloud.mdb.mysql.v1.cluster_service_pb2'
# @@protoc_insertion_point(class_scope:yandex.cloud.mdb.mysql.v1.ListClustersRequest)
})
_sym_db.RegisterMessage(ListClustersRequest)
ListClustersResponse = _reflection.GeneratedProtocolMessageType('ListClustersResponse', (_message.Message,), {
'DESCRIPTOR' : _LISTCLUSTERSRESPONSE,
'__module__' : 'yandex.cloud.mdb.mysql.v1.cluster_service_pb2'
# @@protoc_insertion_point(class_scope:yandex.cloud.mdb.mysql.v1.ListClustersResponse)
})
_sym_db.RegisterMessage(ListClustersResponse)
CreateClusterRequest = _reflection.GeneratedProtocolMessageType('CreateClusterRequest', (_message.Message,), {
'LabelsEntry' : _reflection.GeneratedProtocolMessageType('LabelsEntry', (_message.Message,), {
'DESCRIPTOR' : _CREATECLUSTERREQUEST_LABELSENTRY,
'__module__' : 'yandex.cloud.mdb.mysql.v1.cluster_service_pb2'
# @@protoc_insertion_point(class_scope:yandex.cloud.mdb.mysql.v1.CreateClusterRequest.LabelsEntry)
})
,
'DESCRIPTOR' : _CREATECLUSTERREQUEST,
'__module__' : 'yandex.cloud.mdb.mysql.v1.cluster_service_pb2'
# @@protoc_insertion_point(class_scope:yandex.cloud.mdb.mysql.v1.CreateClusterRequest)
})
_sym_db.RegisterMessage(CreateClusterRequest)
_sym_db.RegisterMessage(CreateClusterRequest.LabelsEntry)
CreateClusterMetadata = _reflection.GeneratedProtocolMessageType('CreateClusterMetadata', (_message.Message,), {
'DESCRIPTOR' : _CREATECLUSTERMETADATA,
'__module__' : 'yandex.cloud.mdb.mysql.v1.cluster_service_pb2'
# @@protoc_insertion_point(class_scope:yandex.cloud.mdb.mysql.v1.CreateClusterMetadata)
})
_sym_db.RegisterMessage(CreateClusterMetadata)
UpdateClusterRequest = _reflection.GeneratedProtocolMessageType('UpdateClusterRequest', (_message.Message,), {
'LabelsEntry' : _reflection.GeneratedProtocolMessageType('LabelsEntry', (_message.Message,), {
'DESCRIPTOR' : _UPDATECLUSTERREQUEST_LABELSENTRY,
'__module__' : 'yandex.cloud.mdb.mysql.v1.cluster_service_pb2'
# @@protoc_insertion_point(class_scope:yandex.cloud.mdb.mysql.v1.UpdateClusterRequest.LabelsEntry)
})
,
'DESCRIPTOR' : _UPDATECLUSTERREQUEST,
'__module__' : 'yandex.cloud.mdb.mysql.v1.cluster_service_pb2'
# @@protoc_insertion_point(class_scope:yandex.cloud.mdb.mysql.v1.UpdateClusterRequest)
})
_sym_db.RegisterMessage(UpdateClusterRequest)
_sym_db.RegisterMessage(UpdateClusterRequest.LabelsEntry)
UpdateClusterMetadata = _reflection.GeneratedProtocolMessageType('UpdateClusterMetadata', (_message.Message,), {
'DESCRIPTOR' : _UPDATECLUSTERMETADATA,
'__module__' : 'yandex.cloud.mdb.mysql.v1.cluster_service_pb2'
# @@protoc_insertion_point(class_scope:yandex.cloud.mdb.mysql.v1.UpdateClusterMetadata)
})
_sym_db.RegisterMessage(UpdateClusterMetadata)
DeleteClusterRequest = _reflection.GeneratedProtocolMessageType('DeleteClusterRequest', (_message.Message,), {
'DESCRIPTOR' : _DELETECLUSTERREQUEST,
'__module__' : 'yandex.cloud.mdb.mysql.v1.cluster_service_pb2'
# @@protoc_insertion_point(class_scope:yandex.cloud.mdb.mysql.v1.DeleteClusterRequest)
})
_sym_db.RegisterMessage(DeleteClusterRequest)
DeleteClusterMetadata = _reflection.GeneratedProtocolMessageType('DeleteClusterMetadata', (_message.Message,), {
'DESCRIPTOR' : _DELETECLUSTERMETADATA,
'__module__' : 'yandex.cloud.mdb.mysql.v1.cluster_service_pb2'
# @@protoc_insertion_point(class_scope:yandex.cloud.mdb.mysql.v1.DeleteClusterMetadata)
})
_sym_db.RegisterMessage(DeleteClusterMetadata)
BackupClusterRequest = _reflection.GeneratedProtocolMessageType('BackupClusterRequest', (_message.Message,), {
'DESCRIPTOR' : _BACKUPCLUSTERREQUEST,
'__module__' : 'yandex.cloud.mdb.mysql.v1.cluster_service_pb2'
# @@protoc_insertion_point(class_scope:yandex.cloud.mdb.mysql.v1.BackupClusterRequest)
})
_sym_db.RegisterMessage(BackupClusterRequest)
BackupClusterMetadata = _reflection.GeneratedProtocolMessageType('BackupClusterMetadata', (_message.Message,), {
'DESCRIPTOR' : _BACKUPCLUSTERMETADATA,
'__module__' : 'yandex.cloud.mdb.mysql.v1.cluster_service_pb2'
# @@protoc_insertion_point(class_scope:yandex.cloud.mdb.mysql.v1.BackupClusterMetadata)
})
_sym_db.RegisterMessage(BackupClusterMetadata)
RestoreClusterRequest = _reflection.GeneratedProtocolMessageType('RestoreClusterRequest', (_message.Message,), {
'LabelsEntry' : _reflection.GeneratedProtocolMessageType('LabelsEntry', (_message.Message,), {
'DESCRIPTOR' : _RESTORECLUSTERREQUEST_LABELSENTRY,
'__module__' : 'yandex.cloud.mdb.mysql.v1.cluster_service_pb2'
# @@protoc_insertion_point(class_scope:yandex.cloud.mdb.mysql.v1.RestoreClusterRequest.LabelsEntry)
})
,
'DESCRIPTOR' : _RESTORECLUSTERREQUEST,
'__module__' : 'yandex.cloud.mdb.mysql.v1.cluster_service_pb2'
# @@protoc_insertion_point(class_scope:yandex.cloud.mdb.mysql.v1.RestoreClusterRequest)
})
_sym_db.RegisterMessage(RestoreClusterRequest)
_sym_db.RegisterMessage(RestoreClusterRequest.LabelsEntry)
RestoreClusterMetadata = _reflection.GeneratedProtocolMessageType('RestoreClusterMetadata', (_message.Message,), {
'DESCRIPTOR' : _RESTORECLUSTERMETADATA,
'__module__' : 'yandex.cloud.mdb.mysql.v1.cluster_service_pb2'
# @@protoc_insertion_point(class_scope:yandex.cloud.mdb.mysql.v1.RestoreClusterMetadata)
})
_sym_db.RegisterMessage(RestoreClusterMetadata)
StartClusterFailoverRequest = _reflection.GeneratedProtocolMessageType('StartClusterFailoverRequest', (_message.Message,), {
'DESCRIPTOR' : _STARTCLUSTERFAILOVERREQUEST,
'__module__' : 'yandex.cloud.mdb.mysql.v1.cluster_service_pb2'
# @@protoc_insertion_point(class_scope:yandex.cloud.mdb.mysql.v1.StartClusterFailoverRequest)
})
_sym_db.RegisterMessage(StartClusterFailoverRequest)
StartClusterFailoverMetadata = _reflection.GeneratedProtocolMessageType('StartClusterFailoverMetadata', (_message.Message,), {
'DESCRIPTOR' : _STARTCLUSTERFAILOVERMETADATA,
'__module__' : 'yandex.cloud.mdb.mysql.v1.cluster_service_pb2'
# @@protoc_insertion_point(class_scope:yandex.cloud.mdb.mysql.v1.StartClusterFailoverMetadata)
})
_sym_db.RegisterMessage(StartClusterFailoverMetadata)
RescheduleMaintenanceRequest = _reflection.GeneratedProtocolMessageType('RescheduleMaintenanceRequest', (_message.Message,), {
'DESCRIPTOR' : _RESCHEDULEMAINTENANCEREQUEST,
'__module__' : 'yandex.cloud.mdb.mysql.v1.cluster_service_pb2'
# @@protoc_insertion_point(class_scope:yandex.cloud.mdb.mysql.v1.RescheduleMaintenanceRequest)
})
_sym_db.RegisterMessage(RescheduleMaintenanceRequest)
RescheduleMaintenanceMetadata = _reflection.GeneratedProtocolMessageType('RescheduleMaintenanceMetadata', (_message.Message,), {
'DESCRIPTOR' : _RESCHEDULEMAINTENANCEMETADATA,
'__module__' : 'yandex.cloud.mdb.mysql.v1.cluster_service_pb2'
# @@protoc_insertion_point(class_scope:yandex.cloud.mdb.mysql.v1.RescheduleMaintenanceMetadata)
})
_sym_db.RegisterMessage(RescheduleMaintenanceMetadata)
LogRecord = _reflection.GeneratedProtocolMessageType('LogRecord', (_message.Message,), {
'MessageEntry' : _reflection.GeneratedProtocolMessageType('MessageEntry', (_message.Message,), {
'DESCRIPTOR' : _LOGRECORD_MESSAGEENTRY,
'__module__' : 'yandex.cloud.mdb.mysql.v1.cluster_service_pb2'
# @@protoc_insertion_point(class_scope:yandex.cloud.mdb.mysql.v1.LogRecord.MessageEntry)
})
,
'DESCRIPTOR' : _LOGRECORD,
'__module__' : 'yandex.cloud.mdb.mysql.v1.cluster_service_pb2'
# @@protoc_insertion_point(class_scope:yandex.cloud.mdb.mysql.v1.LogRecord)
})
_sym_db.RegisterMessage(LogRecord)
_sym_db.RegisterMessage(LogRecord.MessageEntry)
ListClusterLogsRequest = _reflection.GeneratedProtocolMessageType('ListClusterLogsRequest', (_message.Message,), {
'DESCRIPTOR' : _LISTCLUSTERLOGSREQUEST,
'__module__' : 'yandex.cloud.mdb.mysql.v1.cluster_service_pb2'
# @@protoc_insertion_point(class_scope:yandex.cloud.mdb.mysql.v1.ListClusterLogsRequest)
})
_sym_db.RegisterMessage(ListClusterLogsRequest)
ListClusterLogsResponse = _reflection.GeneratedProtocolMessageType('ListClusterLogsResponse', (_message.Message,), {
'DESCRIPTOR' : _LISTCLUSTERLOGSRESPONSE,
'__module__' : 'yandex.cloud.mdb.mysql.v1.cluster_service_pb2'
# @@protoc_insertion_point(class_scope:yandex.cloud.mdb.mysql.v1.ListClusterLogsResponse)
})
_sym_db.RegisterMessage(ListClusterLogsResponse)
StreamLogRecord = _reflection.GeneratedProtocolMessageType('StreamLogRecord', (_message.Message,), {
'DESCRIPTOR' : _STREAMLOGRECORD,
'__module__' : 'yandex.cloud.mdb.mysql.v1.cluster_service_pb2'
# @@protoc_insertion_point(class_scope:yandex.cloud.mdb.mysql.v1.StreamLogRecord)
})
_sym_db.RegisterMessage(StreamLogRecord)
StreamClusterLogsRequest = _reflection.GeneratedProtocolMessageType('StreamClusterLogsRequest', (_message.Message,), {
'DESCRIPTOR' : _STREAMCLUSTERLOGSREQUEST,
'__module__' : 'yandex.cloud.mdb.mysql.v1.cluster_service_pb2'
# @@protoc_insertion_point(class_scope:yandex.cloud.mdb.mysql.v1.StreamClusterLogsRequest)
})
_sym_db.RegisterMessage(StreamClusterLogsRequest)
ListClusterOperationsRequest = _reflection.GeneratedProtocolMessageType('ListClusterOperationsRequest', (_message.Message,), {
'DESCRIPTOR' : _LISTCLUSTEROPERATIONSREQUEST,
'__module__' : 'yandex.cloud.mdb.mysql.v1.cluster_service_pb2'
# @@protoc_insertion_point(class_scope:yandex.cloud.mdb.mysql.v1.ListClusterOperationsRequest)
})
_sym_db.RegisterMessage(ListClusterOperationsRequest)
ListClusterOperationsResponse = _reflection.GeneratedProtocolMessageType('ListClusterOperationsResponse', (_message.Message,), {
'DESCRIPTOR' : _LISTCLUSTEROPERATIONSRESPONSE,
'__module__' : 'yandex.cloud.mdb.mysql.v1.cluster_service_pb2'
# @@protoc_insertion_point(class_scope:yandex.cloud.mdb.mysql.v1.ListClusterOperationsResponse)
})
_sym_db.RegisterMessage(ListClusterOperationsResponse)
ListClusterBackupsRequest = _reflection.GeneratedProtocolMessageType('ListClusterBackupsRequest', (_message.Message,), {
'DESCRIPTOR' : _LISTCLUSTERBACKUPSREQUEST,
'__module__' : 'yandex.cloud.mdb.mysql.v1.cluster_service_pb2'
# @@protoc_insertion_point(class_scope:yandex.cloud.mdb.mysql.v1.ListClusterBackupsRequest)
})
_sym_db.RegisterMessage(ListClusterBackupsRequest)
ListClusterBackupsResponse = _reflection.GeneratedProtocolMessageType('ListClusterBackupsResponse', (_message.Message,), {
'DESCRIPTOR' : _LISTCLUSTERBACKUPSRESPONSE,
'__module__' : 'yandex.cloud.mdb.mysql.v1.cluster_service_pb2'
# @@protoc_insertion_point(class_scope:yandex.cloud.mdb.mysql.v1.ListClusterBackupsResponse)
})
_sym_db.RegisterMessage(ListClusterBackupsResponse)
ListClusterHostsRequest = _reflection.GeneratedProtocolMessageType('ListClusterHostsRequest', (_message.Message,), {
'DESCRIPTOR' : _LISTCLUSTERHOSTSREQUEST,
'__module__' : 'yandex.cloud.mdb.mysql.v1.cluster_service_pb2'
# @@protoc_insertion_point(class_scope:yandex.cloud.mdb.mysql.v1.ListClusterHostsRequest)
})
_sym_db.RegisterMessage(ListClusterHostsRequest)
ListClusterHostsResponse = _reflection.GeneratedProtocolMessageType('ListClusterHostsResponse', (_message.Message,), {
'DESCRIPTOR' : _LISTCLUSTERHOSTSRESPONSE,
'__module__' : 'yandex.cloud.mdb.mysql.v1.cluster_service_pb2'
# @@protoc_insertion_point(class_scope:yandex.cloud.mdb.mysql.v1.ListClusterHostsResponse)
})
_sym_db.RegisterMessage(ListClusterHostsResponse)
AddClusterHostsRequest = _reflection.GeneratedProtocolMessageType('AddClusterHostsRequest', (_message.Message,), {
'DESCRIPTOR' : _ADDCLUSTERHOSTSREQUEST,
'__module__' : 'yandex.cloud.mdb.mysql.v1.cluster_service_pb2'
# @@protoc_insertion_point(class_scope:yandex.cloud.mdb.mysql.v1.AddClusterHostsRequest)
})
_sym_db.RegisterMessage(AddClusterHostsRequest)
AddClusterHostsMetadata = _reflection.GeneratedProtocolMessageType('AddClusterHostsMetadata', (_message.Message,), {
'DESCRIPTOR' : _ADDCLUSTERHOSTSMETADATA,
'__module__' : 'yandex.cloud.mdb.mysql.v1.cluster_service_pb2'
# @@protoc_insertion_point(class_scope:yandex.cloud.mdb.mysql.v1.AddClusterHostsMetadata)
})
_sym_db.RegisterMessage(AddClusterHostsMetadata)
DeleteClusterHostsRequest = _reflection.GeneratedProtocolMessageType('DeleteClusterHostsRequest', (_message.Message,), {
'DESCRIPTOR' : _DELETECLUSTERHOSTSREQUEST,
'__module__' : 'yandex.cloud.mdb.mysql.v1.cluster_service_pb2'
# @@protoc_insertion_point(class_scope:yandex.cloud.mdb.mysql.v1.DeleteClusterHostsRequest)
})
_sym_db.RegisterMessage(DeleteClusterHostsRequest)
DeleteClusterHostsMetadata = _reflection.GeneratedProtocolMessageType('DeleteClusterHostsMetadata', (_message.Message,), {
'DESCRIPTOR' : _DELETECLUSTERHOSTSMETADATA,
'__module__' : 'yandex.cloud.mdb.mysql.v1.cluster_service_pb2'
# @@protoc_insertion_point(class_scope:yandex.cloud.mdb.mysql.v1.DeleteClusterHostsMetadata)
})
_sym_db.RegisterMessage(DeleteClusterHostsMetadata)
StartClusterRequest = _reflection.GeneratedProtocolMessageType('StartClusterRequest', (_message.Message,), {
'DESCRIPTOR' : _STARTCLUSTERREQUEST,
'__module__' : 'yandex.cloud.mdb.mysql.v1.cluster_service_pb2'
# @@protoc_insertion_point(class_scope:yandex.cloud.mdb.mysql.v1.StartClusterRequest)
})
_sym_db.RegisterMessage(StartClusterRequest)
StartClusterMetadata = _reflection.GeneratedProtocolMessageType('StartClusterMetadata', (_message.Message,), {
'DESCRIPTOR' : _STARTCLUSTERMETADATA,
'__module__' : 'yandex.cloud.mdb.mysql.v1.cluster_service_pb2'
# @@protoc_insertion_point(class_scope:yandex.cloud.mdb.mysql.v1.StartClusterMetadata)
})
_sym_db.RegisterMessage(StartClusterMetadata)
StopClusterRequest = _reflection.GeneratedProtocolMessageType('StopClusterRequest', (_message.Message,), {
'DESCRIPTOR' : _STOPCLUSTERREQUEST,
'__module__' : 'yandex.cloud.mdb.mysql.v1.cluster_service_pb2'
# @@protoc_insertion_point(class_scope:yandex.cloud.mdb.mysql.v1.StopClusterRequest)
})
_sym_db.RegisterMessage(StopClusterRequest)
StopClusterMetadata = _reflection.GeneratedProtocolMessageType('StopClusterMetadata', (_message.Message,), {
'DESCRIPTOR' : _STOPCLUSTERMETADATA,
'__module__' : 'yandex.cloud.mdb.mysql.v1.cluster_service_pb2'
# @@protoc_insertion_point(class_scope:yandex.cloud.mdb.mysql.v1.StopClusterMetadata)
})
_sym_db.RegisterMessage(StopClusterMetadata)
MoveClusterRequest = _reflection.GeneratedProtocolMessageType('MoveClusterRequest', (_message.Message,), {
'DESCRIPTOR' : _MOVECLUSTERREQUEST,
'__module__' : 'yandex.cloud.mdb.mysql.v1.cluster_service_pb2'
# @@protoc_insertion_point(class_scope:yandex.cloud.mdb.mysql.v1.MoveClusterRequest)
})
_sym_db.RegisterMessage(MoveClusterRequest)
MoveClusterMetadata = _reflection.GeneratedProtocolMessageType('MoveClusterMetadata', (_message.Message,), {
'DESCRIPTOR' : _MOVECLUSTERMETADATA,
'__module__' : 'yandex.cloud.mdb.mysql.v1.cluster_service_pb2'
# @@protoc_insertion_point(class_scope:yandex.cloud.mdb.mysql.v1.MoveClusterMetadata)
})
_sym_db.RegisterMessage(MoveClusterMetadata)
UpdateClusterHostsRequest = _reflection.GeneratedProtocolMessageType('UpdateClusterHostsRequest', (_message.Message,), {
'DESCRIPTOR' : _UPDATECLUSTERHOSTSREQUEST,
'__module__' : 'yandex.cloud.mdb.mysql.v1.cluster_service_pb2'
# @@protoc_insertion_point(class_scope:yandex.cloud.mdb.mysql.v1.UpdateClusterHostsRequest)
})
_sym_db.RegisterMessage(UpdateClusterHostsRequest)
UpdateClusterHostsMetadata = _reflection.GeneratedProtocolMessageType('UpdateClusterHostsMetadata', (_message.Message,), {
'DESCRIPTOR' : _UPDATECLUSTERHOSTSMETADATA,
'__module__' : 'yandex.cloud.mdb.mysql.v1.cluster_service_pb2'
# @@protoc_insertion_point(class_scope:yandex.cloud.mdb.mysql.v1.UpdateClusterHostsMetadata)
})
_sym_db.RegisterMessage(UpdateClusterHostsMetadata)
UpdateHostSpec = _reflection.GeneratedProtocolMessageType('UpdateHostSpec', (_message.Message,), {
'DESCRIPTOR' : _UPDATEHOSTSPEC,
'__module__' : 'yandex.cloud.mdb.mysql.v1.cluster_service_pb2'
# @@protoc_insertion_point(class_scope:yandex.cloud.mdb.mysql.v1.UpdateHostSpec)
})
_sym_db.RegisterMessage(UpdateHostSpec)
HostSpec = _reflection.GeneratedProtocolMessageType('HostSpec', (_message.Message,), {
'DESCRIPTOR' : _HOSTSPEC,
'__module__' : 'yandex.cloud.mdb.mysql.v1.cluster_service_pb2'
# @@protoc_insertion_point(class_scope:yandex.cloud.mdb.mysql.v1.HostSpec)
})
_sym_db.RegisterMessage(HostSpec)
ConfigSpec = _reflection.GeneratedProtocolMessageType('ConfigSpec', (_message.Message,), {
'DESCRIPTOR' : _CONFIGSPEC,
'__module__' : 'yandex.cloud.mdb.mysql.v1.cluster_service_pb2'
# @@protoc_insertion_point(class_scope:yandex.cloud.mdb.mysql.v1.ConfigSpec)
})
_sym_db.RegisterMessage(ConfigSpec)
DESCRIPTOR._options = None
_GETCLUSTERREQUEST.fields_by_name['cluster_id']._options = None
_LISTCLUSTERSREQUEST.fields_by_name['folder_id']._options = None
_LISTCLUSTERSREQUEST.fields_by_name['page_size']._options = None
_LISTCLUSTERSREQUEST.fields_by_name['page_token']._options = None
_LISTCLUSTERSREQUEST.fields_by_name['filter']._options = None
_CREATECLUSTERREQUEST_LABELSENTRY._options = None
_CREATECLUSTERREQUEST.fields_by_name['folder_id']._options = None
_CREATECLUSTERREQUEST.fields_by_name['name']._options = None
_CREATECLUSTERREQUEST.fields_by_name['description']._options = None
_CREATECLUSTERREQUEST.fields_by_name['labels']._options = None
_CREATECLUSTERREQUEST.fields_by_name['network_id']._options = None
_UPDATECLUSTERREQUEST_LABELSENTRY._options = None
_UPDATECLUSTERREQUEST.fields_by_name['cluster_id']._options = None
_UPDATECLUSTERREQUEST.fields_by_name['description']._options = None
_UPDATECLUSTERREQUEST.fields_by_name['labels']._options = None
_UPDATECLUSTERREQUEST.fields_by_name['name']._options = None
_DELETECLUSTERREQUEST.fields_by_name['cluster_id']._options = None
_BACKUPCLUSTERREQUEST.fields_by_name['cluster_id']._options = None
_RESTORECLUSTERREQUEST_LABELSENTRY._options = None
_RESTORECLUSTERREQUEST.fields_by_name['backup_id']._options = None
_RESTORECLUSTERREQUEST.fields_by_name['time']._options = None
_RESTORECLUSTERREQUEST.fields_by_name['name']._options = None
_RESTORECLUSTERREQUEST.fields_by_name['description']._options = None
_RESTORECLUSTERREQUEST.fields_by_name['labels']._options = None
_RESTORECLUSTERREQUEST.fields_by_name['network_id']._options = None
_RESTORECLUSTERREQUEST.fields_by_name['folder_id']._options = None
_STARTCLUSTERFAILOVERREQUEST.fields_by_name['cluster_id']._options = None
_STARTCLUSTERFAILOVERREQUEST.fields_by_name['host_name']._options = None
_RESCHEDULEMAINTENANCEREQUEST.fields_by_name['cluster_id']._options = None
_RESCHEDULEMAINTENANCEREQUEST.fields_by_name['reschedule_type']._options = None
_LOGRECORD_MESSAGEENTRY._options = None
_LISTCLUSTERLOGSREQUEST.fields_by_name['cluster_id']._options = None
_LISTCLUSTERLOGSREQUEST.fields_by_name['page_size']._options = None
_LISTCLUSTERLOGSREQUEST.fields_by_name['page_token']._options = None
_STREAMCLUSTERLOGSREQUEST.fields_by_name['cluster_id']._options = None
_STREAMCLUSTERLOGSREQUEST.fields_by_name['record_token']._options = None
_STREAMCLUSTERLOGSREQUEST.fields_by_name['filter']._options = None
_LISTCLUSTEROPERATIONSREQUEST.fields_by_name['cluster_id']._options = None
_LISTCLUSTEROPERATIONSREQUEST.fields_by_name['page_size']._options = None
_LISTCLUSTEROPERATIONSREQUEST.fields_by_name['page_token']._options = None
_LISTCLUSTERBACKUPSREQUEST.fields_by_name['cluster_id']._options = None
_LISTCLUSTERBACKUPSREQUEST.fields_by_name['page_size']._options = None
_LISTCLUSTERBACKUPSREQUEST.fields_by_name['page_token']._options = None
_LISTCLUSTERHOSTSREQUEST.fields_by_name['cluster_id']._options = None
_LISTCLUSTERHOSTSREQUEST.fields_by_name['page_size']._options = None
_LISTCLUSTERHOSTSREQUEST.fields_by_name['page_token']._options = None
_ADDCLUSTERHOSTSREQUEST.fields_by_name['cluster_id']._options = None
_ADDCLUSTERHOSTSREQUEST.fields_by_name['host_specs']._options = None
_DELETECLUSTERHOSTSREQUEST.fields_by_name['cluster_id']._options = None
_DELETECLUSTERHOSTSREQUEST.fields_by_name['host_names']._options = None
_STARTCLUSTERREQUEST.fields_by_name['cluster_id']._options = None
_STOPCLUSTERREQUEST.fields_by_name['cluster_id']._options = None
_MOVECLUSTERREQUEST.fields_by_name['cluster_id']._options = None
_MOVECLUSTERREQUEST.fields_by_name['destination_folder_id']._options = None
_UPDATECLUSTERHOSTSREQUEST.fields_by_name['cluster_id']._options = None
_UPDATECLUSTERHOSTSREQUEST.fields_by_name['update_host_specs']._options = None
_UPDATEHOSTSPEC.fields_by_name['host_name']._options = None
_UPDATEHOSTSPEC.fields_by_name['backup_priority']._options = None
_UPDATEHOSTSPEC.fields_by_name['priority']._options = None
_HOSTSPEC.fields_by_name['zone_id']._options = None
_HOSTSPEC.fields_by_name['subnet_id']._options = None
_HOSTSPEC.fields_by_name['backup_priority']._options = None
_HOSTSPEC.fields_by_name['priority']._options = None
_CLUSTERSERVICE = _descriptor.ServiceDescriptor(
name='ClusterService',
full_name='yandex.cloud.mdb.mysql.v1.ClusterService',
file=DESCRIPTOR,
index=0,
serialized_options=None,
create_key=_descriptor._internal_create_key,
serialized_start=7638,
serialized_end=11424,
methods=[
_descriptor.MethodDescriptor(
name='Get',
full_name='yandex.cloud.mdb.mysql.v1.ClusterService.Get',
index=0,
containing_service=None,
input_type=_GETCLUSTERREQUEST,
output_type=yandex_dot_cloud_dot_mdb_dot_mysql_dot_v1_dot_cluster__pb2._CLUSTER,
serialized_options=b'\202\323\344\223\002)\022\'/managed-mysql/v1/clusters/{cluster_id}',
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='List',
full_name='yandex.cloud.mdb.mysql.v1.ClusterService.List',
index=1,
containing_service=None,
input_type=_LISTCLUSTERSREQUEST,
output_type=_LISTCLUSTERSRESPONSE,
serialized_options=b'\202\323\344\223\002\034\022\032/managed-mysql/v1/clusters',
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='Create',
full_name='yandex.cloud.mdb.mysql.v1.ClusterService.Create',
index=2,
containing_service=None,
input_type=_CREATECLUSTERREQUEST,
output_type=yandex_dot_cloud_dot_operation_dot_operation__pb2._OPERATION,
serialized_options=b'\202\323\344\223\002\037\"\032/managed-mysql/v1/clusters:\001*\262\322* \n\025CreateClusterMetadata\022\007Cluster',
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='Update',
full_name='yandex.cloud.mdb.mysql.v1.ClusterService.Update',
index=3,
containing_service=None,
input_type=_UPDATECLUSTERREQUEST,
output_type=yandex_dot_cloud_dot_operation_dot_operation__pb2._OPERATION,
serialized_options=b'\202\323\344\223\002,2\'/managed-mysql/v1/clusters/{cluster_id}:\001*\262\322* \n\025UpdateClusterMetadata\022\007Cluster',
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='Delete',
full_name='yandex.cloud.mdb.mysql.v1.ClusterService.Delete',
index=4,
containing_service=None,
input_type=_DELETECLUSTERREQUEST,
output_type=yandex_dot_cloud_dot_operation_dot_operation__pb2._OPERATION,
serialized_options=b'\202\323\344\223\002)*\'/managed-mysql/v1/clusters/{cluster_id}\262\322*.\n\025DeleteClusterMetadata\022\025google.protobuf.Empty',
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='Start',
full_name='yandex.cloud.mdb.mysql.v1.ClusterService.Start',
index=5,
containing_service=None,
input_type=_STARTCLUSTERREQUEST,
output_type=yandex_dot_cloud_dot_operation_dot_operation__pb2._OPERATION,
serialized_options=b'\202\323\344\223\002/\"-/managed-mysql/v1/clusters/{cluster_id}:start\262\322*\037\n\024StartClusterMetadata\022\007Cluster',
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='Stop',
full_name='yandex.cloud.mdb.mysql.v1.ClusterService.Stop',
index=6,
containing_service=None,
input_type=_STOPCLUSTERREQUEST,
output_type=yandex_dot_cloud_dot_operation_dot_operation__pb2._OPERATION,
serialized_options=b'\202\323\344\223\002.\",/managed-mysql/v1/clusters/{cluster_id}:stop\262\322*\036\n\023StopClusterMetadata\022\007Cluster',
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='Move',
full_name='yandex.cloud.mdb.mysql.v1.ClusterService.Move',
index=7,
containing_service=None,
input_type=_MOVECLUSTERREQUEST,
output_type=yandex_dot_cloud_dot_operation_dot_operation__pb2._OPERATION,
serialized_options=b'\202\323\344\223\0021\",/managed-mysql/v1/clusters/{cluster_id}:move:\001*\262\322*\036\n\023MoveClusterMetadata\022\007Cluster',
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='Backup',
full_name='yandex.cloud.mdb.mysql.v1.ClusterService.Backup',
index=8,
containing_service=None,
input_type=_BACKUPCLUSTERREQUEST,
output_type=yandex_dot_cloud_dot_operation_dot_operation__pb2._OPERATION,
serialized_options=b'\202\323\344\223\0020\"./managed-mysql/v1/clusters/{cluster_id}:backup\262\322* \n\025BackupClusterMetadata\022\007Cluster',
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='Restore',
full_name='yandex.cloud.mdb.mysql.v1.ClusterService.Restore',
index=9,
containing_service=None,
input_type=_RESTORECLUSTERREQUEST,
output_type=yandex_dot_cloud_dot_operation_dot_operation__pb2._OPERATION,
serialized_options=b'\202\323\344\223\002\'\"\"/managed-mysql/v1/clusters:restore:\001*\262\322*!\n\026RestoreClusterMetadata\022\007Cluster',
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='RescheduleMaintenance',
full_name='yandex.cloud.mdb.mysql.v1.ClusterService.RescheduleMaintenance',
index=10,
containing_service=None,
input_type=_RESCHEDULEMAINTENANCEREQUEST,
output_type=yandex_dot_cloud_dot_operation_dot_operation__pb2._OPERATION,
serialized_options=b'\202\323\344\223\002B\"=/managed-mysql/v1/clusters/{cluster_id}:rescheduleMaintenance:\001*\262\322*(\n\035RescheduleMaintenanceMetadata\022\007Cluster',
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='StartFailover',
full_name='yandex.cloud.mdb.mysql.v1.ClusterService.StartFailover',
index=11,
containing_service=None,
input_type=_STARTCLUSTERFAILOVERREQUEST,
output_type=yandex_dot_cloud_dot_operation_dot_operation__pb2._OPERATION,
serialized_options=b'\202\323\344\223\002:\"5/managed-mysql/v1/clusters/{cluster_id}:startFailover:\001*\262\322*\'\n\034StartClusterFailoverMetadata\022\007Cluster',
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='ListLogs',
full_name='yandex.cloud.mdb.mysql.v1.ClusterService.ListLogs',
index=12,
containing_service=None,
input_type=_LISTCLUSTERLOGSREQUEST,
output_type=_LISTCLUSTERLOGSRESPONSE,
serialized_options=b'\202\323\344\223\002.\022,/managed-mysql/v1/clusters/{cluster_id}:logs',
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='StreamLogs',
full_name='yandex.cloud.mdb.mysql.v1.ClusterService.StreamLogs',
index=13,
containing_service=None,
input_type=_STREAMCLUSTERLOGSREQUEST,
output_type=_STREAMLOGRECORD,
serialized_options=b'\202\323\344\223\0025\0223/managed-mysql/v1/clusters/{cluster_id}:stream_logs',
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='ListOperations',
full_name='yandex.cloud.mdb.mysql.v1.ClusterService.ListOperations',
index=14,
containing_service=None,
input_type=_LISTCLUSTEROPERATIONSREQUEST,
output_type=_LISTCLUSTEROPERATIONSRESPONSE,
serialized_options=b'\202\323\344\223\0024\0222/managed-mysql/v1/clusters/{cluster_id}/operations',
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='ListBackups',
full_name='yandex.cloud.mdb.mysql.v1.ClusterService.ListBackups',
index=15,
containing_service=None,
input_type=_LISTCLUSTERBACKUPSREQUEST,
output_type=_LISTCLUSTERBACKUPSRESPONSE,
serialized_options=b'\202\323\344\223\0021\022//managed-mysql/v1/clusters/{cluster_id}/backups',
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='ListHosts',
full_name='yandex.cloud.mdb.mysql.v1.ClusterService.ListHosts',
index=16,
containing_service=None,
input_type=_LISTCLUSTERHOSTSREQUEST,
output_type=_LISTCLUSTERHOSTSRESPONSE,
serialized_options=b'\202\323\344\223\002/\022-/managed-mysql/v1/clusters/{cluster_id}/hosts',
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='AddHosts',
full_name='yandex.cloud.mdb.mysql.v1.ClusterService.AddHosts',
index=17,
containing_service=None,
input_type=_ADDCLUSTERHOSTSREQUEST,
output_type=yandex_dot_cloud_dot_operation_dot_operation__pb2._OPERATION,
serialized_options=b'\202\323\344\223\002>\"9/managed-mysql/v1/clusters/{cluster_id}/hosts:batchCreate:\001*\262\322*0\n\027AddClusterHostsMetadata\022\025google.protobuf.Empty',
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='UpdateHosts',
full_name='yandex.cloud.mdb.mysql.v1.ClusterService.UpdateHosts',
index=18,
containing_service=None,
input_type=_UPDATECLUSTERHOSTSREQUEST,
output_type=yandex_dot_cloud_dot_operation_dot_operation__pb2._OPERATION,
serialized_options=b'\202\323\344\223\002>\"9/managed-mysql/v1/clusters/{cluster_id}/hosts:batchUpdate:\001*\262\322*3\n\032UpdateClusterHostsMetadata\022\025google.protobuf.Empty',
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='DeleteHosts',
full_name='yandex.cloud.mdb.mysql.v1.ClusterService.DeleteHosts',
index=19,
containing_service=None,
input_type=_DELETECLUSTERHOSTSREQUEST,
output_type=yandex_dot_cloud_dot_operation_dot_operation__pb2._OPERATION,
serialized_options=b'\202\323\344\223\002>\"9/managed-mysql/v1/clusters/{cluster_id}/hosts:batchDelete:\001*\262\322*3\n\032DeleteClusterHostsMetadata\022\025google.protobuf.Empty',
create_key=_descriptor._internal_create_key,
),
])
_sym_db.RegisterServiceDescriptor(_CLUSTERSERVICE)
DESCRIPTOR.services_by_name['ClusterService'] = _CLUSTERSERVICE
# @@protoc_insertion_point(module_scope)
| 50.562355 | 17,753 | 0.775139 |
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
_sym_db = _symbol_database.Default()
from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2
from google.protobuf import field_mask_pb2 as google_dot_protobuf_dot_field__mask__pb2
from google.protobuf import timestamp_pb2 as google_dot_protobuf_dot_timestamp__pb2
from google.type import timeofday_pb2 as google_dot_type_dot_timeofday__pb2
from yandex.cloud.api import operation_pb2 as yandex_dot_cloud_dot_api_dot_operation__pb2
from yandex.cloud.operation import operation_pb2 as yandex_dot_cloud_dot_operation_dot_operation__pb2
from yandex.cloud import validation_pb2 as yandex_dot_cloud_dot_validation__pb2
from yandex.cloud.mdb.mysql.v1 import backup_pb2 as yandex_dot_cloud_dot_mdb_dot_mysql_dot_v1_dot_backup__pb2
from yandex.cloud.mdb.mysql.v1 import cluster_pb2 as yandex_dot_cloud_dot_mdb_dot_mysql_dot_v1_dot_cluster__pb2
from yandex.cloud.mdb.mysql.v1 import database_pb2 as yandex_dot_cloud_dot_mdb_dot_mysql_dot_v1_dot_database__pb2
from yandex.cloud.mdb.mysql.v1 import user_pb2 as yandex_dot_cloud_dot_mdb_dot_mysql_dot_v1_dot_user__pb2
from yandex.cloud.mdb.mysql.v1.config import mysql5_7_pb2 as yandex_dot_cloud_dot_mdb_dot_mysql_dot_v1_dot_config_dot_mysql5__7__pb2
from yandex.cloud.mdb.mysql.v1.config import mysql8_0_pb2 as yandex_dot_cloud_dot_mdb_dot_mysql_dot_v1_dot_config_dot_mysql8__0__pb2
from yandex.cloud.mdb.mysql.v1 import maintenance_pb2 as yandex_dot_cloud_dot_mdb_dot_mysql_dot_v1_dot_maintenance__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='yandex/cloud/mdb/mysql/v1/cluster_service.proto',
package='yandex.cloud.mdb.mysql.v1',
syntax='proto3',
serialized_options=b'\n\035yandex.cloud.api.mdb.mysql.v1ZCgithub.com/yandex-cloud/go-genproto/yandex/cloud/mdb/mysql/v1;mysql',
create_key=_descriptor._internal_create_key,
serialized_pb=b'\n/yandex/cloud/mdb/mysql/v1/cluster_service.proto\x12\x19yandex.cloud.mdb.mysql.v1\x1a\x1cgoogle/api/annotations.proto\x1a google/protobuf/field_mask.proto\x1a\x1fgoogle/protobuf/timestamp.proto\x1a\x1bgoogle/type/timeofday.proto\x1a yandex/cloud/api/operation.proto\x1a&yandex/cloud/operation/operation.proto\x1a\x1dyandex/cloud/validation.proto\x1a&yandex/cloud/mdb/mysql/v1/backup.proto\x1a\'yandex/cloud/mdb/mysql/v1/cluster.proto\x1a(yandex/cloud/mdb/mysql/v1/database.proto\x1a$yandex/cloud/mdb/mysql/v1/user.proto\x1a/yandex/cloud/mdb/mysql/v1/config/mysql5_7.proto\x1a/yandex/cloud/mdb/mysql/v1/config/mysql8_0.proto\x1a+yandex/cloud/mdb/mysql/v1/maintenance.proto\"5\n\x11GetClusterRequest\x12 \n\ncluster_id\x18\x01 \x01(\tB\x0c\xe8\xc7\x31\x01\x8a\xc8\x31\x04<=50\"\x90\x01\n\x13ListClustersRequest\x12\x1f\n\tfolder_id\x18\x01 \x01(\tB\x0c\xe8\xc7\x31\x01\x8a\xc8\x31\x04<=50\x12\x1d\n\tpage_size\x18\x02 \x01(\x03\x42\n\xfa\xc7\x31\x06\x30-1000\x12\x1d\n\npage_token\x18\x03 \x01(\tB\t\x8a\xc8\x31\x05<=100\x12\x1a\n\x06\x66ilter\x18\x04 \x01(\tB\n\x8a\xc8\x31\x06<=1000\"e\n\x14ListClustersResponse\x12\x34\n\x08\x63lusters\x18\x01 \x03(\x0b\x32\".yandex.cloud.mdb.mysql.v1.Cluster\x12\x17\n\x0fnext_page_token\x18\x02 \x01(\t\"\xce\x05\n\x14\x43reateClusterRequest\x12\x1f\n\tfolder_id\x18\x01 \x01(\tB\x0c\xe8\xc7\x31\x01\x8a\xc8\x31\x04<=50\x12,\n\x04name\x18\x02 \x01(\tB\x1e\xe8\xc7\x31\x01\x8a\xc8\x31\x04<=63\xf2\xc7\x31\x0e[a-zA-Z0-9_-]*\x12\x1e\n\x0b\x64\x65scription\x18\x03 \x01(\tB\t\x8a\xc8\x31\x05<=256\x12\x8c\x01\n\x06labels\x18\x04 \x03(\x0b\x32;.yandex.cloud.mdb.mysql.v1.CreateClusterRequest.LabelsEntryB?\x82\xc8\x31\x04<=64\x8a\xc8\x31\x04<=63\xf2\xc7\x31\x0b[-_0-9a-z]*\xb2\xc8\x31\x06\x1a\x04\x31-63\xb2\xc8\x31\x12\x12\x10[a-z][-_0-9a-z]*\x12\x43\n\x0b\x65nvironment\x18\x05 \x01(\x0e\x32..yandex.cloud.mdb.mysql.v1.Cluster.Environment\x12:\n\x0b\x63onfig_spec\x18\x06 \x01(\x0b\x32%.yandex.cloud.mdb.mysql.v1.ConfigSpec\x12?\n\x0e\x64\x61tabase_specs\x18\x07 \x03(\x0b\x32\'.yandex.cloud.mdb.mysql.v1.DatabaseSpec\x12\x37\n\nuser_specs\x18\x08 \x03(\x0b\x32#.yandex.cloud.mdb.mysql.v1.UserSpec\x12\x37\n\nhost_specs\x18\t \x03(\x0b\x32#.yandex.cloud.mdb.mysql.v1.HostSpec\x12\x1c\n\nnetwork_id\x18\n \x01(\tB\x08\x8a\xc8\x31\x04<=50\x12\x1a\n\x12security_group_ids\x18\x0b \x03(\t\x12\x1b\n\x13\x64\x65letion_protection\x18\x0c \x01(\x08\x1a-\n\x0bLabelsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\"+\n\x15\x43reateClusterMetadata\x12\x12\n\ncluster_id\x18\x01 \x01(\t\"\xb0\x04\n\x14UpdateClusterRequest\x12 \n\ncluster_id\x18\x01 \x01(\tB\x0c\xe8\xc7\x31\x01\x8a\xc8\x31\x04<=50\x12/\n\x0bupdate_mask\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.FieldMask\x12\x1e\n\x0b\x64\x65scription\x18\x03 \x01(\tB\t\x8a\xc8\x31\x05<=256\x12\x8c\x01\n\x06labels\x18\x04 \x03(\x0b\x32;.yandex.cloud.mdb.mysql.v1.UpdateClusterRequest.LabelsEntryB?\x82\xc8\x31\x04<=64\x8a\xc8\x31\x04<=63\xf2\xc7\x31\x0b[-_0-9a-z]*\xb2\xc8\x31\x06\x1a\x04\x31-63\xb2\xc8\x31\x12\x12\x10[a-z][-_0-9a-z]*\x12:\n\x0b\x63onfig_spec\x18\x05 \x01(\x0b\x32%.yandex.cloud.mdb.mysql.v1.ConfigSpec\x12(\n\x04name\x18\x06 \x01(\tB\x1a\x8a\xc8\x31\x04<=63\xf2\xc7\x31\x0e[a-zA-Z0-9_-]*\x12H\n\x12maintenance_window\x18\x07 \x01(\x0b\x32,.yandex.cloud.mdb.mysql.v1.MaintenanceWindow\x12\x1a\n\x12security_group_ids\x18\x08 \x03(\t\x12\x1b\n\x13\x64\x65letion_protection\x18\t \x01(\x08\x1a-\n\x0bLabelsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\"+\n\x15UpdateClusterMetadata\x12\x12\n\ncluster_id\x18\x01 \x01(\t\"8\n\x14\x44\x65leteClusterRequest\x12 \n\ncluster_id\x18\x01 \x01(\tB\x0c\xe8\xc7\x31\x01\x8a\xc8\x31\x04<=50\"+\n\x15\x44\x65leteClusterMetadata\x12\x12\n\ncluster_id\x18\x01 \x01(\t\"8\n\x14\x42\x61\x63kupClusterRequest\x12 \n\ncluster_id\x18\x01 \x01(\tB\x0c\xe8\xc7\x31\x01\x8a\xc8\x31\x04<=50\"+\n\x15\x42\x61\x63kupClusterMetadata\x12\x12\n\ncluster_id\x18\x01 \x01(\t\"\xf6\x04\n\x15RestoreClusterRequest\x12\x17\n\tbackup_id\x18\x01 \x01(\tB\x04\xe8\xc7\x31\x01\x12.\n\x04time\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.TimestampB\x04\xe8\xc7\x31\x01\x12$\n\x04name\x18\x04 \x01(\tB\x16\xe8\xc7\x31\x01\xf2\xc7\x31\x0e[a-zA-Z0-9_-]*\x12\x1e\n\x0b\x64\x65scription\x18\x05 \x01(\tB\t\x8a\xc8\x31\x05<=256\x12\x8d\x01\n\x06labels\x18\x06 \x03(\x0b\x32<.yandex.cloud.mdb.mysql.v1.RestoreClusterRequest.LabelsEntryB?\x82\xc8\x31\x04<=64\x8a\xc8\x31\x04<=63\xf2\xc7\x31\x0b[-_0-9a-z]*\xb2\xc8\x31\x06\x1a\x04\x31-63\xb2\xc8\x31\x12\x12\x10[a-z][-_0-9a-z]*\x12\x43\n\x0b\x65nvironment\x18\x07 \x01(\x0e\x32..yandex.cloud.mdb.mysql.v1.Cluster.Environment\x12:\n\x0b\x63onfig_spec\x18\x08 \x01(\x0b\x32%.yandex.cloud.mdb.mysql.v1.ConfigSpec\x12\x37\n\nhost_specs\x18\t \x03(\x0b\x32#.yandex.cloud.mdb.mysql.v1.HostSpec\x12\x1c\n\nnetwork_id\x18\n \x01(\tB\x08\x8a\xc8\x31\x04<=50\x12\x1b\n\tfolder_id\x18\x0b \x01(\tB\x08\x8a\xc8\x31\x04<=50\x12\x1a\n\x12security_group_ids\x18\x0c \x03(\t\x1a-\n\x0bLabelsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\"?\n\x16RestoreClusterMetadata\x12\x12\n\ncluster_id\x18\x01 \x01(\t\x12\x11\n\tbackup_id\x18\x02 \x01(\t\"]\n\x1bStartClusterFailoverRequest\x12 \n\ncluster_id\x18\x01 \x01(\tB\x0c\xe8\xc7\x31\x01\x8a\xc8\x31\x04<=50\x12\x1c\n\thost_name\x18\x02 \x01(\tB\t\x8a\xc8\x31\x05<=253\"2\n\x1cStartClusterFailoverMetadata\x12\x12\n\ncluster_id\x18\x01 \x01(\t\"\xca\x02\n\x1cRescheduleMaintenanceRequest\x12 \n\ncluster_id\x18\x01 \x01(\tB\x0c\xe8\xc7\x31\x01\x8a\xc8\x31\x04<=50\x12\x65\n\x0freschedule_type\x18\x02 \x01(\x0e\x32\x46.yandex.cloud.mdb.mysql.v1.RescheduleMaintenanceRequest.RescheduleTypeB\x04\xe8\xc7\x31\x01\x12\x31\n\rdelayed_until\x18\x03 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\"n\n\x0eRescheduleType\x12\x1f\n\x1bRESCHEDULE_TYPE_UNSPECIFIED\x10\x00\x12\r\n\tIMMEDIATE\x10\x01\x12\x19\n\x15NEXT_AVAILABLE_WINDOW\x10\x02\x12\x11\n\rSPECIFIC_TIME\x10\x03\"f\n\x1dRescheduleMaintenanceMetadata\x12\x12\n\ncluster_id\x18\x01 \x01(\t\x12\x31\n\rdelayed_until\x18\x04 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\"\xae\x01\n\tLogRecord\x12-\n\ttimestamp\x18\x01 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x42\n\x07message\x18\x02 \x03(\x0b\x32\x31.yandex.cloud.mdb.mysql.v1.LogRecord.MessageEntry\x1a.\n\x0cMessageEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\"\xd8\x03\n\x16ListClusterLogsRequest\x12 \n\ncluster_id\x18\x01 \x01(\tB\x0c\xe8\xc7\x31\x01\x8a\xc8\x31\x04<=50\x12\x15\n\rcolumn_filter\x18\x02 \x03(\t\x12S\n\x0cservice_type\x18\x03 \x01(\x0e\x32=.yandex.cloud.mdb.mysql.v1.ListClusterLogsRequest.ServiceType\x12-\n\tfrom_time\x18\x04 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12+\n\x07to_time\x18\x05 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x1d\n\tpage_size\x18\x06 \x01(\x03\x42\n\xfa\xc7\x31\x06\x30-1000\x12\x1d\n\npage_token\x18\x07 \x01(\tB\t\x8a\xc8\x31\x05<=100\x12\x1e\n\x16\x61lways_next_page_token\x18\x08 \x01(\x08\"v\n\x0bServiceType\x12\x1c\n\x18SERVICE_TYPE_UNSPECIFIED\x10\x00\x12\x0f\n\x0bMYSQL_ERROR\x10\x01\x12\x11\n\rMYSQL_GENERAL\x10\x02\x12\x14\n\x10MYSQL_SLOW_QUERY\x10\x03\x12\x0f\n\x0bMYSQL_AUDIT\x10\x04\"f\n\x17ListClusterLogsResponse\x12\x32\n\x04logs\x18\x01 \x03(\x0b\x32$.yandex.cloud.mdb.mysql.v1.LogRecord\x12\x17\n\x0fnext_page_token\x18\x02 \x01(\t\"b\n\x0fStreamLogRecord\x12\x34\n\x06record\x18\x01 \x01(\x0b\x32$.yandex.cloud.mdb.mysql.v1.LogRecord\x12\x19\n\x11next_record_token\x18\x02 \x01(\t\"\xbb\x03\n\x18StreamClusterLogsRequest\x12 \n\ncluster_id\x18\x01 \x01(\tB\x0c\xe8\xc7\x31\x01\x8a\xc8\x31\x04<=50\x12\x15\n\rcolumn_filter\x18\x02 \x03(\t\x12U\n\x0cservice_type\x18\x03 \x01(\x0e\x32?.yandex.cloud.mdb.mysql.v1.StreamClusterLogsRequest.ServiceType\x12-\n\tfrom_time\x18\x04 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12+\n\x07to_time\x18\x05 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x1f\n\x0crecord_token\x18\x06 \x01(\tB\t\x8a\xc8\x31\x05<=100\x12\x1a\n\x06\x66ilter\x18\x07 \x01(\tB\n\x8a\xc8\x31\x06<=1000\"v\n\x0bServiceType\x12\x1c\n\x18SERVICE_TYPE_UNSPECIFIED\x10\x00\x12\x0f\n\x0bMYSQL_ERROR\x10\x01\x12\x11\n\rMYSQL_GENERAL\x10\x02\x12\x14\n\x10MYSQL_SLOW_QUERY\x10\x03\x12\x0f\n\x0bMYSQL_AUDIT\x10\x04\"~\n\x1cListClusterOperationsRequest\x12 \n\ncluster_id\x18\x01 \x01(\tB\x0c\xe8\xc7\x31\x01\x8a\xc8\x31\x04<=50\x12\x1d\n\tpage_size\x18\x02 \x01(\x03\x42\n\xfa\xc7\x31\x06\x30-1000\x12\x1d\n\npage_token\x18\x03 \x01(\tB\t\x8a\xc8\x31\x05<=100\"o\n\x1dListClusterOperationsResponse\x12\x35\n\noperations\x18\x01 \x03(\x0b\x32!.yandex.cloud.operation.Operation\x12\x17\n\x0fnext_page_token\x18\x02 \x01(\t\"{\n\x19ListClusterBackupsRequest\x12 \n\ncluster_id\x18\x01 \x01(\tB\x0c\xe8\xc7\x31\x01\x8a\xc8\x31\x04<=50\x12\x1d\n\tpage_size\x18\x02 \x01(\x03\x42\n\xfa\xc7\x31\x06<=1000\x12\x1d\n\npage_token\x18\x03 \x01(\tB\t\x8a\xc8\x31\x05<=100\"i\n\x1aListClusterBackupsResponse\x12\x32\n\x07\x62\x61\x63kups\x18\x01 \x03(\x0b\x32!.yandex.cloud.mdb.mysql.v1.Backup\x12\x17\n\x0fnext_page_token\x18\x02 \x01(\t\"y\n\x17ListClusterHostsRequest\x12 \n\ncluster_id\x18\x01 \x01(\tB\x0c\xe8\xc7\x31\x01\x8a\xc8\x31\x04<=50\x12\x1d\n\tpage_size\x18\x02 \x01(\x03\x42\n\xfa\xc7\x31\x06\x30-1000\x12\x1d\n\npage_token\x18\x03 \x01(\tB\t\x8a\xc8\x31\x05<=100\"c\n\x18ListClusterHostsResponse\x12.\n\x05hosts\x18\x01 \x03(\x0b\x32\x1f.yandex.cloud.mdb.mysql.v1.Host\x12\x17\n\x0fnext_page_token\x18\x02 \x01(\t\"{\n\x16\x41\x64\x64\x43lusterHostsRequest\x12 \n\ncluster_id\x18\x01 \x01(\tB\x0c\xe8\xc7\x31\x01\x8a\xc8\x31\x04<=50\x12?\n\nhost_specs\x18\x02 \x03(\x0b\x32#.yandex.cloud.mdb.mysql.v1.HostSpecB\x06\x82\xc8\x31\x02>0\"A\n\x17\x41\x64\x64\x43lusterHostsMetadata\x12\x12\n\ncluster_id\x18\x01 \x01(\t\x12\x12\n\nhost_names\x18\x02 \x03(\t\"b\n\x19\x44\x65leteClusterHostsRequest\x12 \n\ncluster_id\x18\x01 \x01(\tB\x0c\xe8\xc7\x31\x01\x8a\xc8\x31\x04<=50\x12#\n\nhost_names\x18\x02 \x03(\tB\x0f\x82\xc8\x31\x02>0\x8a\xc8\x31\x05<=253\"D\n\x1a\x44\x65leteClusterHostsMetadata\x12\x12\n\ncluster_id\x18\x01 \x01(\t\x12\x12\n\nhost_names\x18\x02 \x03(\t\"7\n\x13StartClusterRequest\x12 \n\ncluster_id\x18\x01 \x01(\tB\x0c\xe8\xc7\x31\x01\x8a\xc8\x31\x04<=50\"*\n\x14StartClusterMetadata\x12\x12\n\ncluster_id\x18\x01 \x01(\t\"6\n\x12StopClusterRequest\x12 \n\ncluster_id\x18\x01 \x01(\tB\x0c\xe8\xc7\x31\x01\x8a\xc8\x31\x04<=50\")\n\x13StopClusterMetadata\x12\x12\n\ncluster_id\x18\x01 \x01(\t\"c\n\x12MoveClusterRequest\x12 \n\ncluster_id\x18\x01 \x01(\tB\x0c\xe8\xc7\x31\x01\x8a\xc8\x31\x04<=50\x12+\n\x15\x64\x65stination_folder_id\x18\x02 \x01(\tB\x0c\xe8\xc7\x31\x01\x8a\xc8\x31\x04<=50\"b\n\x13MoveClusterMetadata\x12\x12\n\ncluster_id\x18\x01 \x01(\t\x12\x18\n\x10source_folder_id\x18\x02 \x01(\t\x12\x1d\n\x15\x64\x65stination_folder_id\x18\x03 \x01(\t\"\x8b\x01\n\x19UpdateClusterHostsRequest\x12 \n\ncluster_id\x18\x01 \x01(\tB\x0c\xe8\xc7\x31\x01\x8a\xc8\x31\x04<=50\x12L\n\x11update_host_specs\x18\x02 \x03(\x0b\x32).yandex.cloud.mdb.mysql.v1.UpdateHostSpecB\x06\x82\xc8\x31\x02>0\"D\n\x1aUpdateClusterHostsMetadata\x12\x12\n\ncluster_id\x18\x01 \x01(\t\x12\x12\n\nhost_names\x18\x02 \x03(\t\"\xd1\x01\n\x0eUpdateHostSpec\x12\x17\n\thost_name\x18\x01 \x01(\tB\x04\xe8\xc7\x31\x01\x12\x1a\n\x12replication_source\x18\x02 \x01(\t\x12/\n\x0bupdate_mask\x18\x03 \x01(\x0b\x32\x1a.google.protobuf.FieldMask\x12\"\n\x0f\x62\x61\x63kup_priority\x18\x04 \x01(\x03\x42\t\xfa\xc7\x31\x05\x30-100\x12\x18\n\x10\x61ssign_public_ip\x18\x05 \x01(\x08\x12\x1b\n\x08priority\x18\x06 \x01(\x03\x42\t\xfa\xc7\x31\x05\x30-100\"\xb9\x01\n\x08HostSpec\x12\x19\n\x07zone_id\x18\x01 \x01(\tB\x08\x8a\xc8\x31\x04<=50\x12\x1b\n\tsubnet_id\x18\x02 \x01(\tB\x08\x8a\xc8\x31\x04<=50\x12\x18\n\x10\x61ssign_public_ip\x18\x03 \x01(\x08\x12\x1a\n\x12replication_source\x18\x04 \x01(\t\x12\"\n\x0f\x62\x61\x63kup_priority\x18\x05 \x01(\x03\x42\t\xfa\xc7\x31\x05\x30-100\x12\x1b\n\x08priority\x18\x06 \x01(\x03\x42\t\xfa\xc7\x31\x05\x30-100\"\xe0\x03\n\nConfigSpec\x12\x0f\n\x07version\x18\x01 \x01(\t\x12]\n\x10mysql_config_5_7\x18\x02 \x01(\x0b\x32\x30.yandex.cloud.mdb.mysql.v1.config.MysqlConfig5_7H\x00R\x0fmysqlConfig_5_7\x12]\n\x10mysql_config_8_0\x18\x06 \x01(\x0b\x32\x30.yandex.cloud.mdb.mysql.v1.config.MysqlConfig8_0H\x00R\x0fmysqlConfig_8_0\x12\x37\n\tresources\x18\x03 \x01(\x0b\x32$.yandex.cloud.mdb.mysql.v1.Resources\x12\x33\n\x13\x62\x61\x63kup_window_start\x18\x04 \x01(\x0b\x32\x16.google.type.TimeOfDay\x12\x31\n\x06\x61\x63\x63\x65ss\x18\x05 \x01(\x0b\x32!.yandex.cloud.mdb.mysql.v1.Access\x12R\n\x17performance_diagnostics\x18\x07 \x01(\x0b\x32\x31.yandex.cloud.mdb.mysql.v1.PerformanceDiagnosticsB\x0e\n\x0cmysql_config2\xca\x1d\n\x0e\x43lusterService\x12\x88\x01\n\x03Get\x12,.yandex.cloud.mdb.mysql.v1.GetClusterRequest\x1a\".yandex.cloud.mdb.mysql.v1.Cluster\"/\x82\xd3\xe4\x93\x02)\x12\'/managed-mysql/v1/clusters/{cluster_id}\x12\x8b\x01\n\x04List\x12..yandex.cloud.mdb.mysql.v1.ListClustersRequest\x1a/.yandex.cloud.mdb.mysql.v1.ListClustersResponse\"\"\x82\xd3\xe4\x93\x02\x1c\x12\x1a/managed-mysql/v1/clusters\x12\xa7\x01\n\x06\x43reate\x12/.yandex.cloud.mdb.mysql.v1.CreateClusterRequest\x1a!.yandex.cloud.operation.Operation\"I\x82\xd3\xe4\x93\x02\x1f\"\x1a/managed-mysql/v1/clusters:\x01*\xb2\xd2* \n\x15\x43reateClusterMetadata\x12\x07\x43luster\x12\xb4\x01\n\x06Update\x12/.yandex.cloud.mdb.mysql.v1.UpdateClusterRequest\x1a!.yandex.cloud.operation.Operation\"V\x82\xd3\xe4\x93\x02,2\'/managed-mysql/v1/clusters/{cluster_id}:\x01*\xb2\xd2* \n\x15UpdateClusterMetadata\x12\x07\x43luster\x12\xbf\x01\n\x06\x44\x65lete\x12/.yandex.cloud.mdb.mysql.v1.DeleteClusterRequest\x1a!.yandex.cloud.operation.Operation\"a\x82\xd3\xe4\x93\x02)*\'/managed-mysql/v1/clusters/{cluster_id}\xb2\xd2*.\n\x15\x44\x65leteClusterMetadata\x12\x15google.protobuf.Empty\x12\xb4\x01\n\x05Start\x12..yandex.cloud.mdb.mysql.v1.StartClusterRequest\x1a!.yandex.cloud.operation.Operation\"X\x82\xd3\xe4\x93\x02/\"-/managed-mysql/v1/clusters/{cluster_id}:start\xb2\xd2*\x1f\n\x14StartClusterMetadata\x12\x07\x43luster\x12\xb0\x01\n\x04Stop\x12-.yandex.cloud.mdb.mysql.v1.StopClusterRequest\x1a!.yandex.cloud.operation.Operation\"V\x82\xd3\xe4\x93\x02.\",/managed-mysql/v1/clusters/{cluster_id}:stop\xb2\xd2*\x1e\n\x13StopClusterMetadata\x12\x07\x43luster\x12\xb3\x01\n\x04Move\x12-.yandex.cloud.mdb.mysql.v1.MoveClusterRequest\x1a!.yandex.cloud.operation.Operation\"Y\x82\xd3\xe4\x93\x02\x31\",/managed-mysql/v1/clusters/{cluster_id}:move:\x01*\xb2\xd2*\x1e\n\x13MoveClusterMetadata\x12\x07\x43luster\x12\xb8\x01\n\x06\x42\x61\x63kup\x12/.yandex.cloud.mdb.mysql.v1.BackupClusterRequest\x1a!.yandex.cloud.operation.Operation\"Z\x82\xd3\xe4\x93\x02\x30\"./managed-mysql/v1/clusters/{cluster_id}:backup\xb2\xd2* \n\x15\x42\x61\x63kupClusterMetadata\x12\x07\x43luster\x12\xb2\x01\n\x07Restore\x12\x30.yandex.cloud.mdb.mysql.v1.RestoreClusterRequest\x1a!.yandex.cloud.operation.Operation\"R\x82\xd3\xe4\x93\x02\'\"\"/managed-mysql/v1/clusters:restore:\x01*\xb2\xd2*!\n\x16RestoreClusterMetadata\x12\x07\x43luster\x12\xe9\x01\n\x15RescheduleMaintenance\x12\x37.yandex.cloud.mdb.mysql.v1.RescheduleMaintenanceRequest\x1a!.yandex.cloud.operation.Operation\"t\x82\xd3\xe4\x93\x02\x42\"=/managed-mysql/v1/clusters/{cluster_id}:rescheduleMaintenance:\x01*\xb2\xd2*(\n\x1dRescheduleMaintenanceMetadata\x12\x07\x43luster\x12\xd7\x01\n\rStartFailover\x12\x36.yandex.cloud.mdb.mysql.v1.StartClusterFailoverRequest\x1a!.yandex.cloud.operation.Operation\"k\x82\xd3\xe4\x93\x02:\"5/managed-mysql/v1/clusters/{cluster_id}:startFailover:\x01*\xb2\xd2*\'\n\x1cStartClusterFailoverMetadata\x12\x07\x43luster\x12\xa7\x01\n\x08ListLogs\x12\x31.yandex.cloud.mdb.mysql.v1.ListClusterLogsRequest\x1a\x32.yandex.cloud.mdb.mysql.v1.ListClusterLogsResponse\"4\x82\xd3\xe4\x93\x02.\x12,/managed-mysql/v1/clusters/{cluster_id}:logs\x12\xac\x01\n\nStreamLogs\x12\x33.yandex.cloud.mdb.mysql.v1.StreamClusterLogsRequest\x1a*.yandex.cloud.mdb.mysql.v1.StreamLogRecord\";\x82\xd3\xe4\x93\x02\x35\x12\x33/managed-mysql/v1/clusters/{cluster_id}:stream_logs0\x01\x12\xbf\x01\n\x0eListOperations\x12\x37.yandex.cloud.mdb.mysql.v1.ListClusterOperationsRequest\x1a\x38.yandex.cloud.mdb.mysql.v1.ListClusterOperationsResponse\":\x82\xd3\xe4\x93\x02\x34\x12\x32/managed-mysql/v1/clusters/{cluster_id}/operations\x12\xb3\x01\n\x0bListBackups\x12\x34.yandex.cloud.mdb.mysql.v1.ListClusterBackupsRequest\x1a\x35.yandex.cloud.mdb.mysql.v1.ListClusterBackupsResponse\"7\x82\xd3\xe4\x93\x02\x31\x12//managed-mysql/v1/clusters/{cluster_id}/backups\x12\xab\x01\n\tListHosts\x12\x32.yandex.cloud.mdb.mysql.v1.ListClusterHostsRequest\x1a\x33.yandex.cloud.mdb.mysql.v1.ListClusterHostsResponse\"5\x82\xd3\xe4\x93\x02/\x12-/managed-mysql/v1/clusters/{cluster_id}/hosts\x12\xda\x01\n\x08\x41\x64\x64Hosts\x12\x31.yandex.cloud.mdb.mysql.v1.AddClusterHostsRequest\x1a!.yandex.cloud.operation.Operation\"x\x82\xd3\xe4\x93\x02>\"9/managed-mysql/v1/clusters/{cluster_id}/hosts:batchCreate:\x01*\xb2\xd2*0\n\x17\x41\x64\x64\x43lusterHostsMetadata\x12\x15google.protobuf.Empty\x12\xe3\x01\n\x0bUpdateHosts\x12\x34.yandex.cloud.mdb.mysql.v1.UpdateClusterHostsRequest\x1a!.yandex.cloud.operation.Operation\"{\x82\xd3\xe4\x93\x02>\"9/managed-mysql/v1/clusters/{cluster_id}/hosts:batchUpdate:\x01*\xb2\xd2*3\n\x1aUpdateClusterHostsMetadata\x12\x15google.protobuf.Empty\x12\xe3\x01\n\x0b\x44\x65leteHosts\x12\x34.yandex.cloud.mdb.mysql.v1.DeleteClusterHostsRequest\x1a!.yandex.cloud.operation.Operation\"{\x82\xd3\xe4\x93\x02>\"9/managed-mysql/v1/clusters/{cluster_id}/hosts:batchDelete:\x01*\xb2\xd2*3\n\x1a\x44\x65leteClusterHostsMetadata\x12\x15google.protobuf.EmptyBd\n\x1dyandex.cloud.api.mdb.mysql.v1ZCgithub.com/yandex-cloud/go-genproto/yandex/cloud/mdb/mysql/v1;mysqlb\x06proto3'
,
dependencies=[google_dot_api_dot_annotations__pb2.DESCRIPTOR,google_dot_protobuf_dot_field__mask__pb2.DESCRIPTOR,google_dot_protobuf_dot_timestamp__pb2.DESCRIPTOR,google_dot_type_dot_timeofday__pb2.DESCRIPTOR,yandex_dot_cloud_dot_api_dot_operation__pb2.DESCRIPTOR,yandex_dot_cloud_dot_operation_dot_operation__pb2.DESCRIPTOR,yandex_dot_cloud_dot_validation__pb2.DESCRIPTOR,yandex_dot_cloud_dot_mdb_dot_mysql_dot_v1_dot_backup__pb2.DESCRIPTOR,yandex_dot_cloud_dot_mdb_dot_mysql_dot_v1_dot_cluster__pb2.DESCRIPTOR,yandex_dot_cloud_dot_mdb_dot_mysql_dot_v1_dot_database__pb2.DESCRIPTOR,yandex_dot_cloud_dot_mdb_dot_mysql_dot_v1_dot_user__pb2.DESCRIPTOR,yandex_dot_cloud_dot_mdb_dot_mysql_dot_v1_dot_config_dot_mysql5__7__pb2.DESCRIPTOR,yandex_dot_cloud_dot_mdb_dot_mysql_dot_v1_dot_config_dot_mysql8__0__pb2.DESCRIPTOR,yandex_dot_cloud_dot_mdb_dot_mysql_dot_v1_dot_maintenance__pb2.DESCRIPTOR,])
_RESCHEDULEMAINTENANCEREQUEST_RESCHEDULETYPE = _descriptor.EnumDescriptor(
name='RescheduleType',
full_name='yandex.cloud.mdb.mysql.v1.RescheduleMaintenanceRequest.RescheduleType',
filename=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
values=[
_descriptor.EnumValueDescriptor(
name='RESCHEDULE_TYPE_UNSPECIFIED', index=0, number=0,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='IMMEDIATE', index=1, number=1,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='NEXT_AVAILABLE_WINDOW', index=2, number=2,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='SPECIFIC_TIME', index=3, number=3,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
],
containing_type=None,
serialized_options=None,
serialized_start=3564,
serialized_end=3674,
)
_sym_db.RegisterEnumDescriptor(_RESCHEDULEMAINTENANCEREQUEST_RESCHEDULETYPE)
_LISTCLUSTERLOGSREQUEST_SERVICETYPE = _descriptor.EnumDescriptor(
name='ServiceType',
full_name='yandex.cloud.mdb.mysql.v1.ListClusterLogsRequest.ServiceType',
filename=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
values=[
_descriptor.EnumValueDescriptor(
name='SERVICE_TYPE_UNSPECIFIED', index=0, number=0,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='MYSQL_ERROR', index=1, number=1,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='MYSQL_GENERAL', index=2, number=2,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='MYSQL_SLOW_QUERY', index=3, number=3,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='MYSQL_AUDIT', index=4, number=4,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
],
containing_type=None,
serialized_options=None,
serialized_start=4312,
serialized_end=4430,
)
_sym_db.RegisterEnumDescriptor(_LISTCLUSTERLOGSREQUEST_SERVICETYPE)
_STREAMCLUSTERLOGSREQUEST_SERVICETYPE = _descriptor.EnumDescriptor(
name='ServiceType',
full_name='yandex.cloud.mdb.mysql.v1.StreamClusterLogsRequest.ServiceType',
filename=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
values=[
_descriptor.EnumValueDescriptor(
name='SERVICE_TYPE_UNSPECIFIED', index=0, number=0,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='MYSQL_ERROR', index=1, number=1,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='MYSQL_GENERAL', index=2, number=2,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='MYSQL_SLOW_QUERY', index=3, number=3,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='MYSQL_AUDIT', index=4, number=4,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
],
containing_type=None,
serialized_options=None,
serialized_start=4312,
serialized_end=4430,
)
_sym_db.RegisterEnumDescriptor(_STREAMCLUSTERLOGSREQUEST_SERVICETYPE)
_GETCLUSTERREQUEST = _descriptor.Descriptor(
name='GetClusterRequest',
full_name='yandex.cloud.mdb.mysql.v1.GetClusterRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='cluster_id', full_name='yandex.cloud.mdb.mysql.v1.GetClusterRequest.cluster_id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\350\3071\001\212\3101\004<=50', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=613,
serialized_end=666,
)
_LISTCLUSTERSREQUEST = _descriptor.Descriptor(
name='ListClustersRequest',
full_name='yandex.cloud.mdb.mysql.v1.ListClustersRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='folder_id', full_name='yandex.cloud.mdb.mysql.v1.ListClustersRequest.folder_id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\350\3071\001\212\3101\004<=50', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='page_size', full_name='yandex.cloud.mdb.mysql.v1.ListClustersRequest.page_size', index=1,
number=2, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\372\3071\0060-1000', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='page_token', full_name='yandex.cloud.mdb.mysql.v1.ListClustersRequest.page_token', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\212\3101\005<=100', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='filter', full_name='yandex.cloud.mdb.mysql.v1.ListClustersRequest.filter', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\212\3101\006<=1000', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=669,
serialized_end=813,
)
_LISTCLUSTERSRESPONSE = _descriptor.Descriptor(
name='ListClustersResponse',
full_name='yandex.cloud.mdb.mysql.v1.ListClustersResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='clusters', full_name='yandex.cloud.mdb.mysql.v1.ListClustersResponse.clusters', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='next_page_token', full_name='yandex.cloud.mdb.mysql.v1.ListClustersResponse.next_page_token', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=815,
serialized_end=916,
)
_CREATECLUSTERREQUEST_LABELSENTRY = _descriptor.Descriptor(
name='LabelsEntry',
full_name='yandex.cloud.mdb.mysql.v1.CreateClusterRequest.LabelsEntry',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='key', full_name='yandex.cloud.mdb.mysql.v1.CreateClusterRequest.LabelsEntry.key', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='value', full_name='yandex.cloud.mdb.mysql.v1.CreateClusterRequest.LabelsEntry.value', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=b'8\001',
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1592,
serialized_end=1637,
)
_CREATECLUSTERREQUEST = _descriptor.Descriptor(
name='CreateClusterRequest',
full_name='yandex.cloud.mdb.mysql.v1.CreateClusterRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='folder_id', full_name='yandex.cloud.mdb.mysql.v1.CreateClusterRequest.folder_id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\350\3071\001\212\3101\004<=50', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='name', full_name='yandex.cloud.mdb.mysql.v1.CreateClusterRequest.name', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\350\3071\001\212\3101\004<=63\362\3071\016[a-zA-Z0-9_-]*', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='description', full_name='yandex.cloud.mdb.mysql.v1.CreateClusterRequest.description', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\212\3101\005<=256', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='labels', full_name='yandex.cloud.mdb.mysql.v1.CreateClusterRequest.labels', index=3,
number=4, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\202\3101\004<=64\212\3101\004<=63\362\3071\013[-_0-9a-z]*\262\3101\006\032\0041-63\262\3101\022\022\020[a-z][-_0-9a-z]*', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='environment', full_name='yandex.cloud.mdb.mysql.v1.CreateClusterRequest.environment', index=4,
number=5, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='config_spec', full_name='yandex.cloud.mdb.mysql.v1.CreateClusterRequest.config_spec', index=5,
number=6, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='database_specs', full_name='yandex.cloud.mdb.mysql.v1.CreateClusterRequest.database_specs', index=6,
number=7, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='user_specs', full_name='yandex.cloud.mdb.mysql.v1.CreateClusterRequest.user_specs', index=7,
number=8, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='host_specs', full_name='yandex.cloud.mdb.mysql.v1.CreateClusterRequest.host_specs', index=8,
number=9, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='network_id', full_name='yandex.cloud.mdb.mysql.v1.CreateClusterRequest.network_id', index=9,
number=10, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\212\3101\004<=50', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='security_group_ids', full_name='yandex.cloud.mdb.mysql.v1.CreateClusterRequest.security_group_ids', index=10,
number=11, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='deletion_protection', full_name='yandex.cloud.mdb.mysql.v1.CreateClusterRequest.deletion_protection', index=11,
number=12, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[_CREATECLUSTERREQUEST_LABELSENTRY, ],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=919,
serialized_end=1637,
)
_CREATECLUSTERMETADATA = _descriptor.Descriptor(
name='CreateClusterMetadata',
full_name='yandex.cloud.mdb.mysql.v1.CreateClusterMetadata',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='cluster_id', full_name='yandex.cloud.mdb.mysql.v1.CreateClusterMetadata.cluster_id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1639,
serialized_end=1682,
)
_UPDATECLUSTERREQUEST_LABELSENTRY = _descriptor.Descriptor(
name='LabelsEntry',
full_name='yandex.cloud.mdb.mysql.v1.UpdateClusterRequest.LabelsEntry',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='key', full_name='yandex.cloud.mdb.mysql.v1.UpdateClusterRequest.LabelsEntry.key', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='value', full_name='yandex.cloud.mdb.mysql.v1.UpdateClusterRequest.LabelsEntry.value', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=b'8\001',
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1592,
serialized_end=1637,
)
_UPDATECLUSTERREQUEST = _descriptor.Descriptor(
name='UpdateClusterRequest',
full_name='yandex.cloud.mdb.mysql.v1.UpdateClusterRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='cluster_id', full_name='yandex.cloud.mdb.mysql.v1.UpdateClusterRequest.cluster_id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\350\3071\001\212\3101\004<=50', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='update_mask', full_name='yandex.cloud.mdb.mysql.v1.UpdateClusterRequest.update_mask', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='description', full_name='yandex.cloud.mdb.mysql.v1.UpdateClusterRequest.description', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\212\3101\005<=256', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='labels', full_name='yandex.cloud.mdb.mysql.v1.UpdateClusterRequest.labels', index=3,
number=4, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\202\3101\004<=64\212\3101\004<=63\362\3071\013[-_0-9a-z]*\262\3101\006\032\0041-63\262\3101\022\022\020[a-z][-_0-9a-z]*', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='config_spec', full_name='yandex.cloud.mdb.mysql.v1.UpdateClusterRequest.config_spec', index=4,
number=5, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='name', full_name='yandex.cloud.mdb.mysql.v1.UpdateClusterRequest.name', index=5,
number=6, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\212\3101\004<=63\362\3071\016[a-zA-Z0-9_-]*', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='maintenance_window', full_name='yandex.cloud.mdb.mysql.v1.UpdateClusterRequest.maintenance_window', index=6,
number=7, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='security_group_ids', full_name='yandex.cloud.mdb.mysql.v1.UpdateClusterRequest.security_group_ids', index=7,
number=8, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='deletion_protection', full_name='yandex.cloud.mdb.mysql.v1.UpdateClusterRequest.deletion_protection', index=8,
number=9, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[_UPDATECLUSTERREQUEST_LABELSENTRY, ],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1685,
serialized_end=2245,
)
_UPDATECLUSTERMETADATA = _descriptor.Descriptor(
name='UpdateClusterMetadata',
full_name='yandex.cloud.mdb.mysql.v1.UpdateClusterMetadata',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='cluster_id', full_name='yandex.cloud.mdb.mysql.v1.UpdateClusterMetadata.cluster_id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=2247,
serialized_end=2290,
)
_DELETECLUSTERREQUEST = _descriptor.Descriptor(
name='DeleteClusterRequest',
full_name='yandex.cloud.mdb.mysql.v1.DeleteClusterRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='cluster_id', full_name='yandex.cloud.mdb.mysql.v1.DeleteClusterRequest.cluster_id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\350\3071\001\212\3101\004<=50', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=2292,
serialized_end=2348,
)
_DELETECLUSTERMETADATA = _descriptor.Descriptor(
name='DeleteClusterMetadata',
full_name='yandex.cloud.mdb.mysql.v1.DeleteClusterMetadata',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='cluster_id', full_name='yandex.cloud.mdb.mysql.v1.DeleteClusterMetadata.cluster_id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=2350,
serialized_end=2393,
)
_BACKUPCLUSTERREQUEST = _descriptor.Descriptor(
name='BackupClusterRequest',
full_name='yandex.cloud.mdb.mysql.v1.BackupClusterRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='cluster_id', full_name='yandex.cloud.mdb.mysql.v1.BackupClusterRequest.cluster_id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\350\3071\001\212\3101\004<=50', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=2395,
serialized_end=2451,
)
_BACKUPCLUSTERMETADATA = _descriptor.Descriptor(
name='BackupClusterMetadata',
full_name='yandex.cloud.mdb.mysql.v1.BackupClusterMetadata',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='cluster_id', full_name='yandex.cloud.mdb.mysql.v1.BackupClusterMetadata.cluster_id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=2453,
serialized_end=2496,
)
_RESTORECLUSTERREQUEST_LABELSENTRY = _descriptor.Descriptor(
name='LabelsEntry',
full_name='yandex.cloud.mdb.mysql.v1.RestoreClusterRequest.LabelsEntry',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='key', full_name='yandex.cloud.mdb.mysql.v1.RestoreClusterRequest.LabelsEntry.key', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='value', full_name='yandex.cloud.mdb.mysql.v1.RestoreClusterRequest.LabelsEntry.value', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=b'8\001',
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1592,
serialized_end=1637,
)
_RESTORECLUSTERREQUEST = _descriptor.Descriptor(
name='RestoreClusterRequest',
full_name='yandex.cloud.mdb.mysql.v1.RestoreClusterRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='backup_id', full_name='yandex.cloud.mdb.mysql.v1.RestoreClusterRequest.backup_id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\350\3071\001', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='time', full_name='yandex.cloud.mdb.mysql.v1.RestoreClusterRequest.time', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\350\3071\001', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='name', full_name='yandex.cloud.mdb.mysql.v1.RestoreClusterRequest.name', index=2,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\350\3071\001\362\3071\016[a-zA-Z0-9_-]*', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='description', full_name='yandex.cloud.mdb.mysql.v1.RestoreClusterRequest.description', index=3,
number=5, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\212\3101\005<=256', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='labels', full_name='yandex.cloud.mdb.mysql.v1.RestoreClusterRequest.labels', index=4,
number=6, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\202\3101\004<=64\212\3101\004<=63\362\3071\013[-_0-9a-z]*\262\3101\006\032\0041-63\262\3101\022\022\020[a-z][-_0-9a-z]*', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='environment', full_name='yandex.cloud.mdb.mysql.v1.RestoreClusterRequest.environment', index=5,
number=7, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='config_spec', full_name='yandex.cloud.mdb.mysql.v1.RestoreClusterRequest.config_spec', index=6,
number=8, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='host_specs', full_name='yandex.cloud.mdb.mysql.v1.RestoreClusterRequest.host_specs', index=7,
number=9, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='network_id', full_name='yandex.cloud.mdb.mysql.v1.RestoreClusterRequest.network_id', index=8,
number=10, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\212\3101\004<=50', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='folder_id', full_name='yandex.cloud.mdb.mysql.v1.RestoreClusterRequest.folder_id', index=9,
number=11, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\212\3101\004<=50', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='security_group_ids', full_name='yandex.cloud.mdb.mysql.v1.RestoreClusterRequest.security_group_ids', index=10,
number=12, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[_RESTORECLUSTERREQUEST_LABELSENTRY, ],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=2499,
serialized_end=3129,
)
_RESTORECLUSTERMETADATA = _descriptor.Descriptor(
name='RestoreClusterMetadata',
full_name='yandex.cloud.mdb.mysql.v1.RestoreClusterMetadata',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='cluster_id', full_name='yandex.cloud.mdb.mysql.v1.RestoreClusterMetadata.cluster_id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='backup_id', full_name='yandex.cloud.mdb.mysql.v1.RestoreClusterMetadata.backup_id', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=3131,
serialized_end=3194,
)
_STARTCLUSTERFAILOVERREQUEST = _descriptor.Descriptor(
name='StartClusterFailoverRequest',
full_name='yandex.cloud.mdb.mysql.v1.StartClusterFailoverRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='cluster_id', full_name='yandex.cloud.mdb.mysql.v1.StartClusterFailoverRequest.cluster_id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\350\3071\001\212\3101\004<=50', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='host_name', full_name='yandex.cloud.mdb.mysql.v1.StartClusterFailoverRequest.host_name', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\212\3101\005<=253', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=3196,
serialized_end=3289,
)
_STARTCLUSTERFAILOVERMETADATA = _descriptor.Descriptor(
name='StartClusterFailoverMetadata',
full_name='yandex.cloud.mdb.mysql.v1.StartClusterFailoverMetadata',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='cluster_id', full_name='yandex.cloud.mdb.mysql.v1.StartClusterFailoverMetadata.cluster_id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=3291,
serialized_end=3341,
)
_RESCHEDULEMAINTENANCEREQUEST = _descriptor.Descriptor(
name='RescheduleMaintenanceRequest',
full_name='yandex.cloud.mdb.mysql.v1.RescheduleMaintenanceRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='cluster_id', full_name='yandex.cloud.mdb.mysql.v1.RescheduleMaintenanceRequest.cluster_id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\350\3071\001\212\3101\004<=50', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='reschedule_type', full_name='yandex.cloud.mdb.mysql.v1.RescheduleMaintenanceRequest.reschedule_type', index=1,
number=2, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\350\3071\001', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='delayed_until', full_name='yandex.cloud.mdb.mysql.v1.RescheduleMaintenanceRequest.delayed_until', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
_RESCHEDULEMAINTENANCEREQUEST_RESCHEDULETYPE,
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=3344,
serialized_end=3674,
)
_RESCHEDULEMAINTENANCEMETADATA = _descriptor.Descriptor(
name='RescheduleMaintenanceMetadata',
full_name='yandex.cloud.mdb.mysql.v1.RescheduleMaintenanceMetadata',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='cluster_id', full_name='yandex.cloud.mdb.mysql.v1.RescheduleMaintenanceMetadata.cluster_id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='delayed_until', full_name='yandex.cloud.mdb.mysql.v1.RescheduleMaintenanceMetadata.delayed_until', index=1,
number=4, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=3676,
serialized_end=3778,
)
_LOGRECORD_MESSAGEENTRY = _descriptor.Descriptor(
name='MessageEntry',
full_name='yandex.cloud.mdb.mysql.v1.LogRecord.MessageEntry',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='key', full_name='yandex.cloud.mdb.mysql.v1.LogRecord.MessageEntry.key', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='value', full_name='yandex.cloud.mdb.mysql.v1.LogRecord.MessageEntry.value', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=b'8\001',
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=3909,
serialized_end=3955,
)
_LOGRECORD = _descriptor.Descriptor(
name='LogRecord',
full_name='yandex.cloud.mdb.mysql.v1.LogRecord',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='timestamp', full_name='yandex.cloud.mdb.mysql.v1.LogRecord.timestamp', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='message', full_name='yandex.cloud.mdb.mysql.v1.LogRecord.message', index=1,
number=2, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[_LOGRECORD_MESSAGEENTRY, ],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=3781,
serialized_end=3955,
)
_LISTCLUSTERLOGSREQUEST = _descriptor.Descriptor(
name='ListClusterLogsRequest',
full_name='yandex.cloud.mdb.mysql.v1.ListClusterLogsRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='cluster_id', full_name='yandex.cloud.mdb.mysql.v1.ListClusterLogsRequest.cluster_id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\350\3071\001\212\3101\004<=50', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='column_filter', full_name='yandex.cloud.mdb.mysql.v1.ListClusterLogsRequest.column_filter', index=1,
number=2, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='service_type', full_name='yandex.cloud.mdb.mysql.v1.ListClusterLogsRequest.service_type', index=2,
number=3, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='from_time', full_name='yandex.cloud.mdb.mysql.v1.ListClusterLogsRequest.from_time', index=3,
number=4, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='to_time', full_name='yandex.cloud.mdb.mysql.v1.ListClusterLogsRequest.to_time', index=4,
number=5, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='page_size', full_name='yandex.cloud.mdb.mysql.v1.ListClusterLogsRequest.page_size', index=5,
number=6, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\372\3071\0060-1000', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='page_token', full_name='yandex.cloud.mdb.mysql.v1.ListClusterLogsRequest.page_token', index=6,
number=7, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\212\3101\005<=100', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='always_next_page_token', full_name='yandex.cloud.mdb.mysql.v1.ListClusterLogsRequest.always_next_page_token', index=7,
number=8, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
_LISTCLUSTERLOGSREQUEST_SERVICETYPE,
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=3958,
serialized_end=4430,
)
_LISTCLUSTERLOGSRESPONSE = _descriptor.Descriptor(
name='ListClusterLogsResponse',
full_name='yandex.cloud.mdb.mysql.v1.ListClusterLogsResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='logs', full_name='yandex.cloud.mdb.mysql.v1.ListClusterLogsResponse.logs', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='next_page_token', full_name='yandex.cloud.mdb.mysql.v1.ListClusterLogsResponse.next_page_token', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=4432,
serialized_end=4534,
)
_STREAMLOGRECORD = _descriptor.Descriptor(
name='StreamLogRecord',
full_name='yandex.cloud.mdb.mysql.v1.StreamLogRecord',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='record', full_name='yandex.cloud.mdb.mysql.v1.StreamLogRecord.record', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='next_record_token', full_name='yandex.cloud.mdb.mysql.v1.StreamLogRecord.next_record_token', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=4536,
serialized_end=4634,
)
_STREAMCLUSTERLOGSREQUEST = _descriptor.Descriptor(
name='StreamClusterLogsRequest',
full_name='yandex.cloud.mdb.mysql.v1.StreamClusterLogsRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='cluster_id', full_name='yandex.cloud.mdb.mysql.v1.StreamClusterLogsRequest.cluster_id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\350\3071\001\212\3101\004<=50', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='column_filter', full_name='yandex.cloud.mdb.mysql.v1.StreamClusterLogsRequest.column_filter', index=1,
number=2, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='service_type', full_name='yandex.cloud.mdb.mysql.v1.StreamClusterLogsRequest.service_type', index=2,
number=3, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='from_time', full_name='yandex.cloud.mdb.mysql.v1.StreamClusterLogsRequest.from_time', index=3,
number=4, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='to_time', full_name='yandex.cloud.mdb.mysql.v1.StreamClusterLogsRequest.to_time', index=4,
number=5, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='record_token', full_name='yandex.cloud.mdb.mysql.v1.StreamClusterLogsRequest.record_token', index=5,
number=6, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\212\3101\005<=100', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='filter', full_name='yandex.cloud.mdb.mysql.v1.StreamClusterLogsRequest.filter', index=6,
number=7, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\212\3101\006<=1000', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
_STREAMCLUSTERLOGSREQUEST_SERVICETYPE,
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=4637,
serialized_end=5080,
)
_LISTCLUSTEROPERATIONSREQUEST = _descriptor.Descriptor(
name='ListClusterOperationsRequest',
full_name='yandex.cloud.mdb.mysql.v1.ListClusterOperationsRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='cluster_id', full_name='yandex.cloud.mdb.mysql.v1.ListClusterOperationsRequest.cluster_id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\350\3071\001\212\3101\004<=50', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='page_size', full_name='yandex.cloud.mdb.mysql.v1.ListClusterOperationsRequest.page_size', index=1,
number=2, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\372\3071\0060-1000', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='page_token', full_name='yandex.cloud.mdb.mysql.v1.ListClusterOperationsRequest.page_token', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\212\3101\005<=100', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=5082,
serialized_end=5208,
)
_LISTCLUSTEROPERATIONSRESPONSE = _descriptor.Descriptor(
name='ListClusterOperationsResponse',
full_name='yandex.cloud.mdb.mysql.v1.ListClusterOperationsResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='operations', full_name='yandex.cloud.mdb.mysql.v1.ListClusterOperationsResponse.operations', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='next_page_token', full_name='yandex.cloud.mdb.mysql.v1.ListClusterOperationsResponse.next_page_token', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=5210,
serialized_end=5321,
)
_LISTCLUSTERBACKUPSREQUEST = _descriptor.Descriptor(
name='ListClusterBackupsRequest',
full_name='yandex.cloud.mdb.mysql.v1.ListClusterBackupsRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='cluster_id', full_name='yandex.cloud.mdb.mysql.v1.ListClusterBackupsRequest.cluster_id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\350\3071\001\212\3101\004<=50', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='page_size', full_name='yandex.cloud.mdb.mysql.v1.ListClusterBackupsRequest.page_size', index=1,
number=2, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\372\3071\006<=1000', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='page_token', full_name='yandex.cloud.mdb.mysql.v1.ListClusterBackupsRequest.page_token', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\212\3101\005<=100', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=5323,
serialized_end=5446,
)
_LISTCLUSTERBACKUPSRESPONSE = _descriptor.Descriptor(
name='ListClusterBackupsResponse',
full_name='yandex.cloud.mdb.mysql.v1.ListClusterBackupsResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='backups', full_name='yandex.cloud.mdb.mysql.v1.ListClusterBackupsResponse.backups', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='next_page_token', full_name='yandex.cloud.mdb.mysql.v1.ListClusterBackupsResponse.next_page_token', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=5448,
serialized_end=5553,
)
_LISTCLUSTERHOSTSREQUEST = _descriptor.Descriptor(
name='ListClusterHostsRequest',
full_name='yandex.cloud.mdb.mysql.v1.ListClusterHostsRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='cluster_id', full_name='yandex.cloud.mdb.mysql.v1.ListClusterHostsRequest.cluster_id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\350\3071\001\212\3101\004<=50', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='page_size', full_name='yandex.cloud.mdb.mysql.v1.ListClusterHostsRequest.page_size', index=1,
number=2, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\372\3071\0060-1000', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='page_token', full_name='yandex.cloud.mdb.mysql.v1.ListClusterHostsRequest.page_token', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\212\3101\005<=100', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=5555,
serialized_end=5676,
)
_LISTCLUSTERHOSTSRESPONSE = _descriptor.Descriptor(
name='ListClusterHostsResponse',
full_name='yandex.cloud.mdb.mysql.v1.ListClusterHostsResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='hosts', full_name='yandex.cloud.mdb.mysql.v1.ListClusterHostsResponse.hosts', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='next_page_token', full_name='yandex.cloud.mdb.mysql.v1.ListClusterHostsResponse.next_page_token', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=5678,
serialized_end=5777,
)
_ADDCLUSTERHOSTSREQUEST = _descriptor.Descriptor(
name='AddClusterHostsRequest',
full_name='yandex.cloud.mdb.mysql.v1.AddClusterHostsRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='cluster_id', full_name='yandex.cloud.mdb.mysql.v1.AddClusterHostsRequest.cluster_id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\350\3071\001\212\3101\004<=50', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='host_specs', full_name='yandex.cloud.mdb.mysql.v1.AddClusterHostsRequest.host_specs', index=1,
number=2, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\202\3101\002>0', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=5779,
serialized_end=5902,
)
_ADDCLUSTERHOSTSMETADATA = _descriptor.Descriptor(
name='AddClusterHostsMetadata',
full_name='yandex.cloud.mdb.mysql.v1.AddClusterHostsMetadata',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='cluster_id', full_name='yandex.cloud.mdb.mysql.v1.AddClusterHostsMetadata.cluster_id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='host_names', full_name='yandex.cloud.mdb.mysql.v1.AddClusterHostsMetadata.host_names', index=1,
number=2, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=5904,
serialized_end=5969,
)
_DELETECLUSTERHOSTSREQUEST = _descriptor.Descriptor(
name='DeleteClusterHostsRequest',
full_name='yandex.cloud.mdb.mysql.v1.DeleteClusterHostsRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='cluster_id', full_name='yandex.cloud.mdb.mysql.v1.DeleteClusterHostsRequest.cluster_id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\350\3071\001\212\3101\004<=50', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='host_names', full_name='yandex.cloud.mdb.mysql.v1.DeleteClusterHostsRequest.host_names', index=1,
number=2, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\202\3101\002>0\212\3101\005<=253', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=5971,
serialized_end=6069,
)
_DELETECLUSTERHOSTSMETADATA = _descriptor.Descriptor(
name='DeleteClusterHostsMetadata',
full_name='yandex.cloud.mdb.mysql.v1.DeleteClusterHostsMetadata',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='cluster_id', full_name='yandex.cloud.mdb.mysql.v1.DeleteClusterHostsMetadata.cluster_id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='host_names', full_name='yandex.cloud.mdb.mysql.v1.DeleteClusterHostsMetadata.host_names', index=1,
number=2, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=6071,
serialized_end=6139,
)
_STARTCLUSTERREQUEST = _descriptor.Descriptor(
name='StartClusterRequest',
full_name='yandex.cloud.mdb.mysql.v1.StartClusterRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='cluster_id', full_name='yandex.cloud.mdb.mysql.v1.StartClusterRequest.cluster_id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\350\3071\001\212\3101\004<=50', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=6141,
serialized_end=6196,
)
_STARTCLUSTERMETADATA = _descriptor.Descriptor(
name='StartClusterMetadata',
full_name='yandex.cloud.mdb.mysql.v1.StartClusterMetadata',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='cluster_id', full_name='yandex.cloud.mdb.mysql.v1.StartClusterMetadata.cluster_id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=6198,
serialized_end=6240,
)
_STOPCLUSTERREQUEST = _descriptor.Descriptor(
name='StopClusterRequest',
full_name='yandex.cloud.mdb.mysql.v1.StopClusterRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='cluster_id', full_name='yandex.cloud.mdb.mysql.v1.StopClusterRequest.cluster_id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\350\3071\001\212\3101\004<=50', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=6242,
serialized_end=6296,
)
_STOPCLUSTERMETADATA = _descriptor.Descriptor(
name='StopClusterMetadata',
full_name='yandex.cloud.mdb.mysql.v1.StopClusterMetadata',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='cluster_id', full_name='yandex.cloud.mdb.mysql.v1.StopClusterMetadata.cluster_id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=6298,
serialized_end=6339,
)
_MOVECLUSTERREQUEST = _descriptor.Descriptor(
name='MoveClusterRequest',
full_name='yandex.cloud.mdb.mysql.v1.MoveClusterRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='cluster_id', full_name='yandex.cloud.mdb.mysql.v1.MoveClusterRequest.cluster_id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\350\3071\001\212\3101\004<=50', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='destination_folder_id', full_name='yandex.cloud.mdb.mysql.v1.MoveClusterRequest.destination_folder_id', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\350\3071\001\212\3101\004<=50', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=6341,
serialized_end=6440,
)
_MOVECLUSTERMETADATA = _descriptor.Descriptor(
name='MoveClusterMetadata',
full_name='yandex.cloud.mdb.mysql.v1.MoveClusterMetadata',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='cluster_id', full_name='yandex.cloud.mdb.mysql.v1.MoveClusterMetadata.cluster_id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='source_folder_id', full_name='yandex.cloud.mdb.mysql.v1.MoveClusterMetadata.source_folder_id', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='destination_folder_id', full_name='yandex.cloud.mdb.mysql.v1.MoveClusterMetadata.destination_folder_id', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=6442,
serialized_end=6540,
)
_UPDATECLUSTERHOSTSREQUEST = _descriptor.Descriptor(
name='UpdateClusterHostsRequest',
full_name='yandex.cloud.mdb.mysql.v1.UpdateClusterHostsRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='cluster_id', full_name='yandex.cloud.mdb.mysql.v1.UpdateClusterHostsRequest.cluster_id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\350\3071\001\212\3101\004<=50', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='update_host_specs', full_name='yandex.cloud.mdb.mysql.v1.UpdateClusterHostsRequest.update_host_specs', index=1,
number=2, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\202\3101\002>0', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=6543,
serialized_end=6682,
)
_UPDATECLUSTERHOSTSMETADATA = _descriptor.Descriptor(
name='UpdateClusterHostsMetadata',
full_name='yandex.cloud.mdb.mysql.v1.UpdateClusterHostsMetadata',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='cluster_id', full_name='yandex.cloud.mdb.mysql.v1.UpdateClusterHostsMetadata.cluster_id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='host_names', full_name='yandex.cloud.mdb.mysql.v1.UpdateClusterHostsMetadata.host_names', index=1,
number=2, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=6684,
serialized_end=6752,
)
_UPDATEHOSTSPEC = _descriptor.Descriptor(
name='UpdateHostSpec',
full_name='yandex.cloud.mdb.mysql.v1.UpdateHostSpec',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='host_name', full_name='yandex.cloud.mdb.mysql.v1.UpdateHostSpec.host_name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\350\3071\001', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='replication_source', full_name='yandex.cloud.mdb.mysql.v1.UpdateHostSpec.replication_source', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='update_mask', full_name='yandex.cloud.mdb.mysql.v1.UpdateHostSpec.update_mask', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='backup_priority', full_name='yandex.cloud.mdb.mysql.v1.UpdateHostSpec.backup_priority', index=3,
number=4, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\372\3071\0050-100', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='assign_public_ip', full_name='yandex.cloud.mdb.mysql.v1.UpdateHostSpec.assign_public_ip', index=4,
number=5, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='priority', full_name='yandex.cloud.mdb.mysql.v1.UpdateHostSpec.priority', index=5,
number=6, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\372\3071\0050-100', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=6755,
serialized_end=6964,
)
_HOSTSPEC = _descriptor.Descriptor(
name='HostSpec',
full_name='yandex.cloud.mdb.mysql.v1.HostSpec',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='zone_id', full_name='yandex.cloud.mdb.mysql.v1.HostSpec.zone_id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\212\3101\004<=50', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='subnet_id', full_name='yandex.cloud.mdb.mysql.v1.HostSpec.subnet_id', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\212\3101\004<=50', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='assign_public_ip', full_name='yandex.cloud.mdb.mysql.v1.HostSpec.assign_public_ip', index=2,
number=3, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='replication_source', full_name='yandex.cloud.mdb.mysql.v1.HostSpec.replication_source', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='backup_priority', full_name='yandex.cloud.mdb.mysql.v1.HostSpec.backup_priority', index=4,
number=5, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\372\3071\0050-100', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='priority', full_name='yandex.cloud.mdb.mysql.v1.HostSpec.priority', index=5,
number=6, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\372\3071\0050-100', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=6967,
serialized_end=7152,
)
_CONFIGSPEC = _descriptor.Descriptor(
name='ConfigSpec',
full_name='yandex.cloud.mdb.mysql.v1.ConfigSpec',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='version', full_name='yandex.cloud.mdb.mysql.v1.ConfigSpec.version', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='mysql_config_5_7', full_name='yandex.cloud.mdb.mysql.v1.ConfigSpec.mysql_config_5_7', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, json_name='mysqlConfig_5_7', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='mysql_config_8_0', full_name='yandex.cloud.mdb.mysql.v1.ConfigSpec.mysql_config_8_0', index=2,
number=6, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, json_name='mysqlConfig_8_0', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='resources', full_name='yandex.cloud.mdb.mysql.v1.ConfigSpec.resources', index=3,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='backup_window_start', full_name='yandex.cloud.mdb.mysql.v1.ConfigSpec.backup_window_start', index=4,
number=4, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='access', full_name='yandex.cloud.mdb.mysql.v1.ConfigSpec.access', index=5,
number=5, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='performance_diagnostics', full_name='yandex.cloud.mdb.mysql.v1.ConfigSpec.performance_diagnostics', index=6,
number=7, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
_descriptor.OneofDescriptor(
name='mysql_config', full_name='yandex.cloud.mdb.mysql.v1.ConfigSpec.mysql_config',
index=0, containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[]),
],
serialized_start=7155,
serialized_end=7635,
)
_LISTCLUSTERSRESPONSE.fields_by_name['clusters'].message_type = yandex_dot_cloud_dot_mdb_dot_mysql_dot_v1_dot_cluster__pb2._CLUSTER
_CREATECLUSTERREQUEST_LABELSENTRY.containing_type = _CREATECLUSTERREQUEST
_CREATECLUSTERREQUEST.fields_by_name['labels'].message_type = _CREATECLUSTERREQUEST_LABELSENTRY
_CREATECLUSTERREQUEST.fields_by_name['environment'].enum_type = yandex_dot_cloud_dot_mdb_dot_mysql_dot_v1_dot_cluster__pb2._CLUSTER_ENVIRONMENT
_CREATECLUSTERREQUEST.fields_by_name['config_spec'].message_type = _CONFIGSPEC
_CREATECLUSTERREQUEST.fields_by_name['database_specs'].message_type = yandex_dot_cloud_dot_mdb_dot_mysql_dot_v1_dot_database__pb2._DATABASESPEC
_CREATECLUSTERREQUEST.fields_by_name['user_specs'].message_type = yandex_dot_cloud_dot_mdb_dot_mysql_dot_v1_dot_user__pb2._USERSPEC
_CREATECLUSTERREQUEST.fields_by_name['host_specs'].message_type = _HOSTSPEC
_UPDATECLUSTERREQUEST_LABELSENTRY.containing_type = _UPDATECLUSTERREQUEST
_UPDATECLUSTERREQUEST.fields_by_name['update_mask'].message_type = google_dot_protobuf_dot_field__mask__pb2._FIELDMASK
_UPDATECLUSTERREQUEST.fields_by_name['labels'].message_type = _UPDATECLUSTERREQUEST_LABELSENTRY
_UPDATECLUSTERREQUEST.fields_by_name['config_spec'].message_type = _CONFIGSPEC
_UPDATECLUSTERREQUEST.fields_by_name['maintenance_window'].message_type = yandex_dot_cloud_dot_mdb_dot_mysql_dot_v1_dot_maintenance__pb2._MAINTENANCEWINDOW
_RESTORECLUSTERREQUEST_LABELSENTRY.containing_type = _RESTORECLUSTERREQUEST
_RESTORECLUSTERREQUEST.fields_by_name['time'].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP
_RESTORECLUSTERREQUEST.fields_by_name['labels'].message_type = _RESTORECLUSTERREQUEST_LABELSENTRY
_RESTORECLUSTERREQUEST.fields_by_name['environment'].enum_type = yandex_dot_cloud_dot_mdb_dot_mysql_dot_v1_dot_cluster__pb2._CLUSTER_ENVIRONMENT
_RESTORECLUSTERREQUEST.fields_by_name['config_spec'].message_type = _CONFIGSPEC
_RESTORECLUSTERREQUEST.fields_by_name['host_specs'].message_type = _HOSTSPEC
_RESCHEDULEMAINTENANCEREQUEST.fields_by_name['reschedule_type'].enum_type = _RESCHEDULEMAINTENANCEREQUEST_RESCHEDULETYPE
_RESCHEDULEMAINTENANCEREQUEST.fields_by_name['delayed_until'].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP
_RESCHEDULEMAINTENANCEREQUEST_RESCHEDULETYPE.containing_type = _RESCHEDULEMAINTENANCEREQUEST
_RESCHEDULEMAINTENANCEMETADATA.fields_by_name['delayed_until'].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP
_LOGRECORD_MESSAGEENTRY.containing_type = _LOGRECORD
_LOGRECORD.fields_by_name['timestamp'].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP
_LOGRECORD.fields_by_name['message'].message_type = _LOGRECORD_MESSAGEENTRY
_LISTCLUSTERLOGSREQUEST.fields_by_name['service_type'].enum_type = _LISTCLUSTERLOGSREQUEST_SERVICETYPE
_LISTCLUSTERLOGSREQUEST.fields_by_name['from_time'].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP
_LISTCLUSTERLOGSREQUEST.fields_by_name['to_time'].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP
_LISTCLUSTERLOGSREQUEST_SERVICETYPE.containing_type = _LISTCLUSTERLOGSREQUEST
_LISTCLUSTERLOGSRESPONSE.fields_by_name['logs'].message_type = _LOGRECORD
_STREAMLOGRECORD.fields_by_name['record'].message_type = _LOGRECORD
_STREAMCLUSTERLOGSREQUEST.fields_by_name['service_type'].enum_type = _STREAMCLUSTERLOGSREQUEST_SERVICETYPE
_STREAMCLUSTERLOGSREQUEST.fields_by_name['from_time'].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP
_STREAMCLUSTERLOGSREQUEST.fields_by_name['to_time'].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP
_STREAMCLUSTERLOGSREQUEST_SERVICETYPE.containing_type = _STREAMCLUSTERLOGSREQUEST
_LISTCLUSTEROPERATIONSRESPONSE.fields_by_name['operations'].message_type = yandex_dot_cloud_dot_operation_dot_operation__pb2._OPERATION
_LISTCLUSTERBACKUPSRESPONSE.fields_by_name['backups'].message_type = yandex_dot_cloud_dot_mdb_dot_mysql_dot_v1_dot_backup__pb2._BACKUP
_LISTCLUSTERHOSTSRESPONSE.fields_by_name['hosts'].message_type = yandex_dot_cloud_dot_mdb_dot_mysql_dot_v1_dot_cluster__pb2._HOST
_ADDCLUSTERHOSTSREQUEST.fields_by_name['host_specs'].message_type = _HOSTSPEC
_UPDATECLUSTERHOSTSREQUEST.fields_by_name['update_host_specs'].message_type = _UPDATEHOSTSPEC
_UPDATEHOSTSPEC.fields_by_name['update_mask'].message_type = google_dot_protobuf_dot_field__mask__pb2._FIELDMASK
_CONFIGSPEC.fields_by_name['mysql_config_5_7'].message_type = yandex_dot_cloud_dot_mdb_dot_mysql_dot_v1_dot_config_dot_mysql5__7__pb2._MYSQLCONFIG5_7
_CONFIGSPEC.fields_by_name['mysql_config_8_0'].message_type = yandex_dot_cloud_dot_mdb_dot_mysql_dot_v1_dot_config_dot_mysql8__0__pb2._MYSQLCONFIG8_0
_CONFIGSPEC.fields_by_name['resources'].message_type = yandex_dot_cloud_dot_mdb_dot_mysql_dot_v1_dot_cluster__pb2._RESOURCES
_CONFIGSPEC.fields_by_name['backup_window_start'].message_type = google_dot_type_dot_timeofday__pb2._TIMEOFDAY
_CONFIGSPEC.fields_by_name['access'].message_type = yandex_dot_cloud_dot_mdb_dot_mysql_dot_v1_dot_cluster__pb2._ACCESS
_CONFIGSPEC.fields_by_name['performance_diagnostics'].message_type = yandex_dot_cloud_dot_mdb_dot_mysql_dot_v1_dot_cluster__pb2._PERFORMANCEDIAGNOSTICS
_CONFIGSPEC.oneofs_by_name['mysql_config'].fields.append(
_CONFIGSPEC.fields_by_name['mysql_config_5_7'])
_CONFIGSPEC.fields_by_name['mysql_config_5_7'].containing_oneof = _CONFIGSPEC.oneofs_by_name['mysql_config']
_CONFIGSPEC.oneofs_by_name['mysql_config'].fields.append(
_CONFIGSPEC.fields_by_name['mysql_config_8_0'])
_CONFIGSPEC.fields_by_name['mysql_config_8_0'].containing_oneof = _CONFIGSPEC.oneofs_by_name['mysql_config']
DESCRIPTOR.message_types_by_name['GetClusterRequest'] = _GETCLUSTERREQUEST
DESCRIPTOR.message_types_by_name['ListClustersRequest'] = _LISTCLUSTERSREQUEST
DESCRIPTOR.message_types_by_name['ListClustersResponse'] = _LISTCLUSTERSRESPONSE
DESCRIPTOR.message_types_by_name['CreateClusterRequest'] = _CREATECLUSTERREQUEST
DESCRIPTOR.message_types_by_name['CreateClusterMetadata'] = _CREATECLUSTERMETADATA
DESCRIPTOR.message_types_by_name['UpdateClusterRequest'] = _UPDATECLUSTERREQUEST
DESCRIPTOR.message_types_by_name['UpdateClusterMetadata'] = _UPDATECLUSTERMETADATA
DESCRIPTOR.message_types_by_name['DeleteClusterRequest'] = _DELETECLUSTERREQUEST
DESCRIPTOR.message_types_by_name['DeleteClusterMetadata'] = _DELETECLUSTERMETADATA
DESCRIPTOR.message_types_by_name['BackupClusterRequest'] = _BACKUPCLUSTERREQUEST
DESCRIPTOR.message_types_by_name['BackupClusterMetadata'] = _BACKUPCLUSTERMETADATA
DESCRIPTOR.message_types_by_name['RestoreClusterRequest'] = _RESTORECLUSTERREQUEST
DESCRIPTOR.message_types_by_name['RestoreClusterMetadata'] = _RESTORECLUSTERMETADATA
DESCRIPTOR.message_types_by_name['StartClusterFailoverRequest'] = _STARTCLUSTERFAILOVERREQUEST
DESCRIPTOR.message_types_by_name['StartClusterFailoverMetadata'] = _STARTCLUSTERFAILOVERMETADATA
DESCRIPTOR.message_types_by_name['RescheduleMaintenanceRequest'] = _RESCHEDULEMAINTENANCEREQUEST
DESCRIPTOR.message_types_by_name['RescheduleMaintenanceMetadata'] = _RESCHEDULEMAINTENANCEMETADATA
DESCRIPTOR.message_types_by_name['LogRecord'] = _LOGRECORD
DESCRIPTOR.message_types_by_name['ListClusterLogsRequest'] = _LISTCLUSTERLOGSREQUEST
DESCRIPTOR.message_types_by_name['ListClusterLogsResponse'] = _LISTCLUSTERLOGSRESPONSE
DESCRIPTOR.message_types_by_name['StreamLogRecord'] = _STREAMLOGRECORD
DESCRIPTOR.message_types_by_name['StreamClusterLogsRequest'] = _STREAMCLUSTERLOGSREQUEST
DESCRIPTOR.message_types_by_name['ListClusterOperationsRequest'] = _LISTCLUSTEROPERATIONSREQUEST
DESCRIPTOR.message_types_by_name['ListClusterOperationsResponse'] = _LISTCLUSTEROPERATIONSRESPONSE
DESCRIPTOR.message_types_by_name['ListClusterBackupsRequest'] = _LISTCLUSTERBACKUPSREQUEST
DESCRIPTOR.message_types_by_name['ListClusterBackupsResponse'] = _LISTCLUSTERBACKUPSRESPONSE
DESCRIPTOR.message_types_by_name['ListClusterHostsRequest'] = _LISTCLUSTERHOSTSREQUEST
DESCRIPTOR.message_types_by_name['ListClusterHostsResponse'] = _LISTCLUSTERHOSTSRESPONSE
DESCRIPTOR.message_types_by_name['AddClusterHostsRequest'] = _ADDCLUSTERHOSTSREQUEST
DESCRIPTOR.message_types_by_name['AddClusterHostsMetadata'] = _ADDCLUSTERHOSTSMETADATA
DESCRIPTOR.message_types_by_name['DeleteClusterHostsRequest'] = _DELETECLUSTERHOSTSREQUEST
DESCRIPTOR.message_types_by_name['DeleteClusterHostsMetadata'] = _DELETECLUSTERHOSTSMETADATA
DESCRIPTOR.message_types_by_name['StartClusterRequest'] = _STARTCLUSTERREQUEST
DESCRIPTOR.message_types_by_name['StartClusterMetadata'] = _STARTCLUSTERMETADATA
DESCRIPTOR.message_types_by_name['StopClusterRequest'] = _STOPCLUSTERREQUEST
DESCRIPTOR.message_types_by_name['StopClusterMetadata'] = _STOPCLUSTERMETADATA
DESCRIPTOR.message_types_by_name['MoveClusterRequest'] = _MOVECLUSTERREQUEST
DESCRIPTOR.message_types_by_name['MoveClusterMetadata'] = _MOVECLUSTERMETADATA
DESCRIPTOR.message_types_by_name['UpdateClusterHostsRequest'] = _UPDATECLUSTERHOSTSREQUEST
DESCRIPTOR.message_types_by_name['UpdateClusterHostsMetadata'] = _UPDATECLUSTERHOSTSMETADATA
DESCRIPTOR.message_types_by_name['UpdateHostSpec'] = _UPDATEHOSTSPEC
DESCRIPTOR.message_types_by_name['HostSpec'] = _HOSTSPEC
DESCRIPTOR.message_types_by_name['ConfigSpec'] = _CONFIGSPEC
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
GetClusterRequest = _reflection.GeneratedProtocolMessageType('GetClusterRequest', (_message.Message,), {
'DESCRIPTOR' : _GETCLUSTERREQUEST,
'__module__' : 'yandex.cloud.mdb.mysql.v1.cluster_service_pb2'
# @@protoc_insertion_point(class_scope:yandex.cloud.mdb.mysql.v1.GetClusterRequest)
})
_sym_db.RegisterMessage(GetClusterRequest)
ListClustersRequest = _reflection.GeneratedProtocolMessageType('ListClustersRequest', (_message.Message,), {
'DESCRIPTOR' : _LISTCLUSTERSREQUEST,
'__module__' : 'yandex.cloud.mdb.mysql.v1.cluster_service_pb2'
# @@protoc_insertion_point(class_scope:yandex.cloud.mdb.mysql.v1.ListClustersRequest)
})
_sym_db.RegisterMessage(ListClustersRequest)
ListClustersResponse = _reflection.GeneratedProtocolMessageType('ListClustersResponse', (_message.Message,), {
'DESCRIPTOR' : _LISTCLUSTERSRESPONSE,
'__module__' : 'yandex.cloud.mdb.mysql.v1.cluster_service_pb2'
# @@protoc_insertion_point(class_scope:yandex.cloud.mdb.mysql.v1.ListClustersResponse)
})
_sym_db.RegisterMessage(ListClustersResponse)
CreateClusterRequest = _reflection.GeneratedProtocolMessageType('CreateClusterRequest', (_message.Message,), {
'LabelsEntry' : _reflection.GeneratedProtocolMessageType('LabelsEntry', (_message.Message,), {
'DESCRIPTOR' : _CREATECLUSTERREQUEST_LABELSENTRY,
'__module__' : 'yandex.cloud.mdb.mysql.v1.cluster_service_pb2'
# @@protoc_insertion_point(class_scope:yandex.cloud.mdb.mysql.v1.CreateClusterRequest.LabelsEntry)
})
,
'DESCRIPTOR' : _CREATECLUSTERREQUEST,
'__module__' : 'yandex.cloud.mdb.mysql.v1.cluster_service_pb2'
# @@protoc_insertion_point(class_scope:yandex.cloud.mdb.mysql.v1.CreateClusterRequest)
})
_sym_db.RegisterMessage(CreateClusterRequest)
_sym_db.RegisterMessage(CreateClusterRequest.LabelsEntry)
CreateClusterMetadata = _reflection.GeneratedProtocolMessageType('CreateClusterMetadata', (_message.Message,), {
'DESCRIPTOR' : _CREATECLUSTERMETADATA,
'__module__' : 'yandex.cloud.mdb.mysql.v1.cluster_service_pb2'
# @@protoc_insertion_point(class_scope:yandex.cloud.mdb.mysql.v1.CreateClusterMetadata)
})
_sym_db.RegisterMessage(CreateClusterMetadata)
UpdateClusterRequest = _reflection.GeneratedProtocolMessageType('UpdateClusterRequest', (_message.Message,), {
'LabelsEntry' : _reflection.GeneratedProtocolMessageType('LabelsEntry', (_message.Message,), {
'DESCRIPTOR' : _UPDATECLUSTERREQUEST_LABELSENTRY,
'__module__' : 'yandex.cloud.mdb.mysql.v1.cluster_service_pb2'
# @@protoc_insertion_point(class_scope:yandex.cloud.mdb.mysql.v1.UpdateClusterRequest.LabelsEntry)
})
,
'DESCRIPTOR' : _UPDATECLUSTERREQUEST,
'__module__' : 'yandex.cloud.mdb.mysql.v1.cluster_service_pb2'
# @@protoc_insertion_point(class_scope:yandex.cloud.mdb.mysql.v1.UpdateClusterRequest)
})
_sym_db.RegisterMessage(UpdateClusterRequest)
_sym_db.RegisterMessage(UpdateClusterRequest.LabelsEntry)
UpdateClusterMetadata = _reflection.GeneratedProtocolMessageType('UpdateClusterMetadata', (_message.Message,), {
'DESCRIPTOR' : _UPDATECLUSTERMETADATA,
'__module__' : 'yandex.cloud.mdb.mysql.v1.cluster_service_pb2'
# @@protoc_insertion_point(class_scope:yandex.cloud.mdb.mysql.v1.UpdateClusterMetadata)
})
_sym_db.RegisterMessage(UpdateClusterMetadata)
DeleteClusterRequest = _reflection.GeneratedProtocolMessageType('DeleteClusterRequest', (_message.Message,), {
'DESCRIPTOR' : _DELETECLUSTERREQUEST,
'__module__' : 'yandex.cloud.mdb.mysql.v1.cluster_service_pb2'
# @@protoc_insertion_point(class_scope:yandex.cloud.mdb.mysql.v1.DeleteClusterRequest)
})
_sym_db.RegisterMessage(DeleteClusterRequest)
DeleteClusterMetadata = _reflection.GeneratedProtocolMessageType('DeleteClusterMetadata', (_message.Message,), {
'DESCRIPTOR' : _DELETECLUSTERMETADATA,
'__module__' : 'yandex.cloud.mdb.mysql.v1.cluster_service_pb2'
# @@protoc_insertion_point(class_scope:yandex.cloud.mdb.mysql.v1.DeleteClusterMetadata)
})
_sym_db.RegisterMessage(DeleteClusterMetadata)
BackupClusterRequest = _reflection.GeneratedProtocolMessageType('BackupClusterRequest', (_message.Message,), {
'DESCRIPTOR' : _BACKUPCLUSTERREQUEST,
'__module__' : 'yandex.cloud.mdb.mysql.v1.cluster_service_pb2'
# @@protoc_insertion_point(class_scope:yandex.cloud.mdb.mysql.v1.BackupClusterRequest)
})
_sym_db.RegisterMessage(BackupClusterRequest)
BackupClusterMetadata = _reflection.GeneratedProtocolMessageType('BackupClusterMetadata', (_message.Message,), {
'DESCRIPTOR' : _BACKUPCLUSTERMETADATA,
'__module__' : 'yandex.cloud.mdb.mysql.v1.cluster_service_pb2'
# @@protoc_insertion_point(class_scope:yandex.cloud.mdb.mysql.v1.BackupClusterMetadata)
})
_sym_db.RegisterMessage(BackupClusterMetadata)
RestoreClusterRequest = _reflection.GeneratedProtocolMessageType('RestoreClusterRequest', (_message.Message,), {
'LabelsEntry' : _reflection.GeneratedProtocolMessageType('LabelsEntry', (_message.Message,), {
'DESCRIPTOR' : _RESTORECLUSTERREQUEST_LABELSENTRY,
'__module__' : 'yandex.cloud.mdb.mysql.v1.cluster_service_pb2'
# @@protoc_insertion_point(class_scope:yandex.cloud.mdb.mysql.v1.RestoreClusterRequest.LabelsEntry)
})
,
'DESCRIPTOR' : _RESTORECLUSTERREQUEST,
'__module__' : 'yandex.cloud.mdb.mysql.v1.cluster_service_pb2'
# @@protoc_insertion_point(class_scope:yandex.cloud.mdb.mysql.v1.RestoreClusterRequest)
})
_sym_db.RegisterMessage(RestoreClusterRequest)
_sym_db.RegisterMessage(RestoreClusterRequest.LabelsEntry)
RestoreClusterMetadata = _reflection.GeneratedProtocolMessageType('RestoreClusterMetadata', (_message.Message,), {
'DESCRIPTOR' : _RESTORECLUSTERMETADATA,
'__module__' : 'yandex.cloud.mdb.mysql.v1.cluster_service_pb2'
# @@protoc_insertion_point(class_scope:yandex.cloud.mdb.mysql.v1.RestoreClusterMetadata)
})
_sym_db.RegisterMessage(RestoreClusterMetadata)
StartClusterFailoverRequest = _reflection.GeneratedProtocolMessageType('StartClusterFailoverRequest', (_message.Message,), {
'DESCRIPTOR' : _STARTCLUSTERFAILOVERREQUEST,
'__module__' : 'yandex.cloud.mdb.mysql.v1.cluster_service_pb2'
# @@protoc_insertion_point(class_scope:yandex.cloud.mdb.mysql.v1.StartClusterFailoverRequest)
})
_sym_db.RegisterMessage(StartClusterFailoverRequest)
StartClusterFailoverMetadata = _reflection.GeneratedProtocolMessageType('StartClusterFailoverMetadata', (_message.Message,), {
'DESCRIPTOR' : _STARTCLUSTERFAILOVERMETADATA,
'__module__' : 'yandex.cloud.mdb.mysql.v1.cluster_service_pb2'
# @@protoc_insertion_point(class_scope:yandex.cloud.mdb.mysql.v1.StartClusterFailoverMetadata)
})
_sym_db.RegisterMessage(StartClusterFailoverMetadata)
RescheduleMaintenanceRequest = _reflection.GeneratedProtocolMessageType('RescheduleMaintenanceRequest', (_message.Message,), {
'DESCRIPTOR' : _RESCHEDULEMAINTENANCEREQUEST,
'__module__' : 'yandex.cloud.mdb.mysql.v1.cluster_service_pb2'
# @@protoc_insertion_point(class_scope:yandex.cloud.mdb.mysql.v1.RescheduleMaintenanceRequest)
})
_sym_db.RegisterMessage(RescheduleMaintenanceRequest)
RescheduleMaintenanceMetadata = _reflection.GeneratedProtocolMessageType('RescheduleMaintenanceMetadata', (_message.Message,), {
'DESCRIPTOR' : _RESCHEDULEMAINTENANCEMETADATA,
'__module__' : 'yandex.cloud.mdb.mysql.v1.cluster_service_pb2'
# @@protoc_insertion_point(class_scope:yandex.cloud.mdb.mysql.v1.RescheduleMaintenanceMetadata)
})
_sym_db.RegisterMessage(RescheduleMaintenanceMetadata)
LogRecord = _reflection.GeneratedProtocolMessageType('LogRecord', (_message.Message,), {
'MessageEntry' : _reflection.GeneratedProtocolMessageType('MessageEntry', (_message.Message,), {
'DESCRIPTOR' : _LOGRECORD_MESSAGEENTRY,
'__module__' : 'yandex.cloud.mdb.mysql.v1.cluster_service_pb2'
# @@protoc_insertion_point(class_scope:yandex.cloud.mdb.mysql.v1.LogRecord.MessageEntry)
})
,
'DESCRIPTOR' : _LOGRECORD,
'__module__' : 'yandex.cloud.mdb.mysql.v1.cluster_service_pb2'
# @@protoc_insertion_point(class_scope:yandex.cloud.mdb.mysql.v1.LogRecord)
})
_sym_db.RegisterMessage(LogRecord)
_sym_db.RegisterMessage(LogRecord.MessageEntry)
ListClusterLogsRequest = _reflection.GeneratedProtocolMessageType('ListClusterLogsRequest', (_message.Message,), {
'DESCRIPTOR' : _LISTCLUSTERLOGSREQUEST,
'__module__' : 'yandex.cloud.mdb.mysql.v1.cluster_service_pb2'
# @@protoc_insertion_point(class_scope:yandex.cloud.mdb.mysql.v1.ListClusterLogsRequest)
})
_sym_db.RegisterMessage(ListClusterLogsRequest)
ListClusterLogsResponse = _reflection.GeneratedProtocolMessageType('ListClusterLogsResponse', (_message.Message,), {
'DESCRIPTOR' : _LISTCLUSTERLOGSRESPONSE,
'__module__' : 'yandex.cloud.mdb.mysql.v1.cluster_service_pb2'
# @@protoc_insertion_point(class_scope:yandex.cloud.mdb.mysql.v1.ListClusterLogsResponse)
})
_sym_db.RegisterMessage(ListClusterLogsResponse)
StreamLogRecord = _reflection.GeneratedProtocolMessageType('StreamLogRecord', (_message.Message,), {
'DESCRIPTOR' : _STREAMLOGRECORD,
'__module__' : 'yandex.cloud.mdb.mysql.v1.cluster_service_pb2'
# @@protoc_insertion_point(class_scope:yandex.cloud.mdb.mysql.v1.StreamLogRecord)
})
_sym_db.RegisterMessage(StreamLogRecord)
StreamClusterLogsRequest = _reflection.GeneratedProtocolMessageType('StreamClusterLogsRequest', (_message.Message,), {
'DESCRIPTOR' : _STREAMCLUSTERLOGSREQUEST,
'__module__' : 'yandex.cloud.mdb.mysql.v1.cluster_service_pb2'
# @@protoc_insertion_point(class_scope:yandex.cloud.mdb.mysql.v1.StreamClusterLogsRequest)
})
_sym_db.RegisterMessage(StreamClusterLogsRequest)
ListClusterOperationsRequest = _reflection.GeneratedProtocolMessageType('ListClusterOperationsRequest', (_message.Message,), {
'DESCRIPTOR' : _LISTCLUSTEROPERATIONSREQUEST,
'__module__' : 'yandex.cloud.mdb.mysql.v1.cluster_service_pb2'
# @@protoc_insertion_point(class_scope:yandex.cloud.mdb.mysql.v1.ListClusterOperationsRequest)
})
_sym_db.RegisterMessage(ListClusterOperationsRequest)
ListClusterOperationsResponse = _reflection.GeneratedProtocolMessageType('ListClusterOperationsResponse', (_message.Message,), {
'DESCRIPTOR' : _LISTCLUSTEROPERATIONSRESPONSE,
'__module__' : 'yandex.cloud.mdb.mysql.v1.cluster_service_pb2'
# @@protoc_insertion_point(class_scope:yandex.cloud.mdb.mysql.v1.ListClusterOperationsResponse)
})
_sym_db.RegisterMessage(ListClusterOperationsResponse)
ListClusterBackupsRequest = _reflection.GeneratedProtocolMessageType('ListClusterBackupsRequest', (_message.Message,), {
'DESCRIPTOR' : _LISTCLUSTERBACKUPSREQUEST,
'__module__' : 'yandex.cloud.mdb.mysql.v1.cluster_service_pb2'
# @@protoc_insertion_point(class_scope:yandex.cloud.mdb.mysql.v1.ListClusterBackupsRequest)
})
_sym_db.RegisterMessage(ListClusterBackupsRequest)
ListClusterBackupsResponse = _reflection.GeneratedProtocolMessageType('ListClusterBackupsResponse', (_message.Message,), {
'DESCRIPTOR' : _LISTCLUSTERBACKUPSRESPONSE,
'__module__' : 'yandex.cloud.mdb.mysql.v1.cluster_service_pb2'
# @@protoc_insertion_point(class_scope:yandex.cloud.mdb.mysql.v1.ListClusterBackupsResponse)
})
_sym_db.RegisterMessage(ListClusterBackupsResponse)
ListClusterHostsRequest = _reflection.GeneratedProtocolMessageType('ListClusterHostsRequest', (_message.Message,), {
'DESCRIPTOR' : _LISTCLUSTERHOSTSREQUEST,
'__module__' : 'yandex.cloud.mdb.mysql.v1.cluster_service_pb2'
# @@protoc_insertion_point(class_scope:yandex.cloud.mdb.mysql.v1.ListClusterHostsRequest)
})
_sym_db.RegisterMessage(ListClusterHostsRequest)
ListClusterHostsResponse = _reflection.GeneratedProtocolMessageType('ListClusterHostsResponse', (_message.Message,), {
'DESCRIPTOR' : _LISTCLUSTERHOSTSRESPONSE,
'__module__' : 'yandex.cloud.mdb.mysql.v1.cluster_service_pb2'
# @@protoc_insertion_point(class_scope:yandex.cloud.mdb.mysql.v1.ListClusterHostsResponse)
})
_sym_db.RegisterMessage(ListClusterHostsResponse)
AddClusterHostsRequest = _reflection.GeneratedProtocolMessageType('AddClusterHostsRequest', (_message.Message,), {
'DESCRIPTOR' : _ADDCLUSTERHOSTSREQUEST,
'__module__' : 'yandex.cloud.mdb.mysql.v1.cluster_service_pb2'
# @@protoc_insertion_point(class_scope:yandex.cloud.mdb.mysql.v1.AddClusterHostsRequest)
})
_sym_db.RegisterMessage(AddClusterHostsRequest)
AddClusterHostsMetadata = _reflection.GeneratedProtocolMessageType('AddClusterHostsMetadata', (_message.Message,), {
'DESCRIPTOR' : _ADDCLUSTERHOSTSMETADATA,
'__module__' : 'yandex.cloud.mdb.mysql.v1.cluster_service_pb2'
# @@protoc_insertion_point(class_scope:yandex.cloud.mdb.mysql.v1.AddClusterHostsMetadata)
})
_sym_db.RegisterMessage(AddClusterHostsMetadata)
DeleteClusterHostsRequest = _reflection.GeneratedProtocolMessageType('DeleteClusterHostsRequest', (_message.Message,), {
'DESCRIPTOR' : _DELETECLUSTERHOSTSREQUEST,
'__module__' : 'yandex.cloud.mdb.mysql.v1.cluster_service_pb2'
# @@protoc_insertion_point(class_scope:yandex.cloud.mdb.mysql.v1.DeleteClusterHostsRequest)
})
_sym_db.RegisterMessage(DeleteClusterHostsRequest)
DeleteClusterHostsMetadata = _reflection.GeneratedProtocolMessageType('DeleteClusterHostsMetadata', (_message.Message,), {
'DESCRIPTOR' : _DELETECLUSTERHOSTSMETADATA,
'__module__' : 'yandex.cloud.mdb.mysql.v1.cluster_service_pb2'
# @@protoc_insertion_point(class_scope:yandex.cloud.mdb.mysql.v1.DeleteClusterHostsMetadata)
})
_sym_db.RegisterMessage(DeleteClusterHostsMetadata)
StartClusterRequest = _reflection.GeneratedProtocolMessageType('StartClusterRequest', (_message.Message,), {
'DESCRIPTOR' : _STARTCLUSTERREQUEST,
'__module__' : 'yandex.cloud.mdb.mysql.v1.cluster_service_pb2'
# @@protoc_insertion_point(class_scope:yandex.cloud.mdb.mysql.v1.StartClusterRequest)
})
_sym_db.RegisterMessage(StartClusterRequest)
StartClusterMetadata = _reflection.GeneratedProtocolMessageType('StartClusterMetadata', (_message.Message,), {
'DESCRIPTOR' : _STARTCLUSTERMETADATA,
'__module__' : 'yandex.cloud.mdb.mysql.v1.cluster_service_pb2'
# @@protoc_insertion_point(class_scope:yandex.cloud.mdb.mysql.v1.StartClusterMetadata)
})
_sym_db.RegisterMessage(StartClusterMetadata)
StopClusterRequest = _reflection.GeneratedProtocolMessageType('StopClusterRequest', (_message.Message,), {
'DESCRIPTOR' : _STOPCLUSTERREQUEST,
'__module__' : 'yandex.cloud.mdb.mysql.v1.cluster_service_pb2'
# @@protoc_insertion_point(class_scope:yandex.cloud.mdb.mysql.v1.StopClusterRequest)
})
_sym_db.RegisterMessage(StopClusterRequest)
StopClusterMetadata = _reflection.GeneratedProtocolMessageType('StopClusterMetadata', (_message.Message,), {
'DESCRIPTOR' : _STOPCLUSTERMETADATA,
'__module__' : 'yandex.cloud.mdb.mysql.v1.cluster_service_pb2'
# @@protoc_insertion_point(class_scope:yandex.cloud.mdb.mysql.v1.StopClusterMetadata)
})
_sym_db.RegisterMessage(StopClusterMetadata)
MoveClusterRequest = _reflection.GeneratedProtocolMessageType('MoveClusterRequest', (_message.Message,), {
'DESCRIPTOR' : _MOVECLUSTERREQUEST,
'__module__' : 'yandex.cloud.mdb.mysql.v1.cluster_service_pb2'
# @@protoc_insertion_point(class_scope:yandex.cloud.mdb.mysql.v1.MoveClusterRequest)
})
_sym_db.RegisterMessage(MoveClusterRequest)
MoveClusterMetadata = _reflection.GeneratedProtocolMessageType('MoveClusterMetadata', (_message.Message,), {
'DESCRIPTOR' : _MOVECLUSTERMETADATA,
'__module__' : 'yandex.cloud.mdb.mysql.v1.cluster_service_pb2'
# @@protoc_insertion_point(class_scope:yandex.cloud.mdb.mysql.v1.MoveClusterMetadata)
})
_sym_db.RegisterMessage(MoveClusterMetadata)
UpdateClusterHostsRequest = _reflection.GeneratedProtocolMessageType('UpdateClusterHostsRequest', (_message.Message,), {
'DESCRIPTOR' : _UPDATECLUSTERHOSTSREQUEST,
'__module__' : 'yandex.cloud.mdb.mysql.v1.cluster_service_pb2'
# @@protoc_insertion_point(class_scope:yandex.cloud.mdb.mysql.v1.UpdateClusterHostsRequest)
})
_sym_db.RegisterMessage(UpdateClusterHostsRequest)
UpdateClusterHostsMetadata = _reflection.GeneratedProtocolMessageType('UpdateClusterHostsMetadata', (_message.Message,), {
'DESCRIPTOR' : _UPDATECLUSTERHOSTSMETADATA,
'__module__' : 'yandex.cloud.mdb.mysql.v1.cluster_service_pb2'
# @@protoc_insertion_point(class_scope:yandex.cloud.mdb.mysql.v1.UpdateClusterHostsMetadata)
})
_sym_db.RegisterMessage(UpdateClusterHostsMetadata)
UpdateHostSpec = _reflection.GeneratedProtocolMessageType('UpdateHostSpec', (_message.Message,), {
'DESCRIPTOR' : _UPDATEHOSTSPEC,
'__module__' : 'yandex.cloud.mdb.mysql.v1.cluster_service_pb2'
# @@protoc_insertion_point(class_scope:yandex.cloud.mdb.mysql.v1.UpdateHostSpec)
})
_sym_db.RegisterMessage(UpdateHostSpec)
HostSpec = _reflection.GeneratedProtocolMessageType('HostSpec', (_message.Message,), {
'DESCRIPTOR' : _HOSTSPEC,
'__module__' : 'yandex.cloud.mdb.mysql.v1.cluster_service_pb2'
# @@protoc_insertion_point(class_scope:yandex.cloud.mdb.mysql.v1.HostSpec)
})
_sym_db.RegisterMessage(HostSpec)
ConfigSpec = _reflection.GeneratedProtocolMessageType('ConfigSpec', (_message.Message,), {
'DESCRIPTOR' : _CONFIGSPEC,
'__module__' : 'yandex.cloud.mdb.mysql.v1.cluster_service_pb2'
# @@protoc_insertion_point(class_scope:yandex.cloud.mdb.mysql.v1.ConfigSpec)
})
_sym_db.RegisterMessage(ConfigSpec)
DESCRIPTOR._options = None
_GETCLUSTERREQUEST.fields_by_name['cluster_id']._options = None
_LISTCLUSTERSREQUEST.fields_by_name['folder_id']._options = None
_LISTCLUSTERSREQUEST.fields_by_name['page_size']._options = None
_LISTCLUSTERSREQUEST.fields_by_name['page_token']._options = None
_LISTCLUSTERSREQUEST.fields_by_name['filter']._options = None
_CREATECLUSTERREQUEST_LABELSENTRY._options = None
_CREATECLUSTERREQUEST.fields_by_name['folder_id']._options = None
_CREATECLUSTERREQUEST.fields_by_name['name']._options = None
_CREATECLUSTERREQUEST.fields_by_name['description']._options = None
_CREATECLUSTERREQUEST.fields_by_name['labels']._options = None
_CREATECLUSTERREQUEST.fields_by_name['network_id']._options = None
_UPDATECLUSTERREQUEST_LABELSENTRY._options = None
_UPDATECLUSTERREQUEST.fields_by_name['cluster_id']._options = None
_UPDATECLUSTERREQUEST.fields_by_name['description']._options = None
_UPDATECLUSTERREQUEST.fields_by_name['labels']._options = None
_UPDATECLUSTERREQUEST.fields_by_name['name']._options = None
_DELETECLUSTERREQUEST.fields_by_name['cluster_id']._options = None
_BACKUPCLUSTERREQUEST.fields_by_name['cluster_id']._options = None
_RESTORECLUSTERREQUEST_LABELSENTRY._options = None
_RESTORECLUSTERREQUEST.fields_by_name['backup_id']._options = None
_RESTORECLUSTERREQUEST.fields_by_name['time']._options = None
_RESTORECLUSTERREQUEST.fields_by_name['name']._options = None
_RESTORECLUSTERREQUEST.fields_by_name['description']._options = None
_RESTORECLUSTERREQUEST.fields_by_name['labels']._options = None
_RESTORECLUSTERREQUEST.fields_by_name['network_id']._options = None
_RESTORECLUSTERREQUEST.fields_by_name['folder_id']._options = None
_STARTCLUSTERFAILOVERREQUEST.fields_by_name['cluster_id']._options = None
_STARTCLUSTERFAILOVERREQUEST.fields_by_name['host_name']._options = None
_RESCHEDULEMAINTENANCEREQUEST.fields_by_name['cluster_id']._options = None
_RESCHEDULEMAINTENANCEREQUEST.fields_by_name['reschedule_type']._options = None
_LOGRECORD_MESSAGEENTRY._options = None
_LISTCLUSTERLOGSREQUEST.fields_by_name['cluster_id']._options = None
_LISTCLUSTERLOGSREQUEST.fields_by_name['page_size']._options = None
_LISTCLUSTERLOGSREQUEST.fields_by_name['page_token']._options = None
_STREAMCLUSTERLOGSREQUEST.fields_by_name['cluster_id']._options = None
_STREAMCLUSTERLOGSREQUEST.fields_by_name['record_token']._options = None
_STREAMCLUSTERLOGSREQUEST.fields_by_name['filter']._options = None
_LISTCLUSTEROPERATIONSREQUEST.fields_by_name['cluster_id']._options = None
_LISTCLUSTEROPERATIONSREQUEST.fields_by_name['page_size']._options = None
_LISTCLUSTEROPERATIONSREQUEST.fields_by_name['page_token']._options = None
_LISTCLUSTERBACKUPSREQUEST.fields_by_name['cluster_id']._options = None
_LISTCLUSTERBACKUPSREQUEST.fields_by_name['page_size']._options = None
_LISTCLUSTERBACKUPSREQUEST.fields_by_name['page_token']._options = None
_LISTCLUSTERHOSTSREQUEST.fields_by_name['cluster_id']._options = None
_LISTCLUSTERHOSTSREQUEST.fields_by_name['page_size']._options = None
_LISTCLUSTERHOSTSREQUEST.fields_by_name['page_token']._options = None
_ADDCLUSTERHOSTSREQUEST.fields_by_name['cluster_id']._options = None
_ADDCLUSTERHOSTSREQUEST.fields_by_name['host_specs']._options = None
_DELETECLUSTERHOSTSREQUEST.fields_by_name['cluster_id']._options = None
_DELETECLUSTERHOSTSREQUEST.fields_by_name['host_names']._options = None
_STARTCLUSTERREQUEST.fields_by_name['cluster_id']._options = None
_STOPCLUSTERREQUEST.fields_by_name['cluster_id']._options = None
_MOVECLUSTERREQUEST.fields_by_name['cluster_id']._options = None
_MOVECLUSTERREQUEST.fields_by_name['destination_folder_id']._options = None
_UPDATECLUSTERHOSTSREQUEST.fields_by_name['cluster_id']._options = None
_UPDATECLUSTERHOSTSREQUEST.fields_by_name['update_host_specs']._options = None
_UPDATEHOSTSPEC.fields_by_name['host_name']._options = None
_UPDATEHOSTSPEC.fields_by_name['backup_priority']._options = None
_UPDATEHOSTSPEC.fields_by_name['priority']._options = None
_HOSTSPEC.fields_by_name['zone_id']._options = None
_HOSTSPEC.fields_by_name['subnet_id']._options = None
_HOSTSPEC.fields_by_name['backup_priority']._options = None
_HOSTSPEC.fields_by_name['priority']._options = None
_CLUSTERSERVICE = _descriptor.ServiceDescriptor(
name='ClusterService',
full_name='yandex.cloud.mdb.mysql.v1.ClusterService',
file=DESCRIPTOR,
index=0,
serialized_options=None,
create_key=_descriptor._internal_create_key,
serialized_start=7638,
serialized_end=11424,
methods=[
_descriptor.MethodDescriptor(
name='Get',
full_name='yandex.cloud.mdb.mysql.v1.ClusterService.Get',
index=0,
containing_service=None,
input_type=_GETCLUSTERREQUEST,
output_type=yandex_dot_cloud_dot_mdb_dot_mysql_dot_v1_dot_cluster__pb2._CLUSTER,
serialized_options=b'\202\323\344\223\002)\022\'/managed-mysql/v1/clusters/{cluster_id}',
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='List',
full_name='yandex.cloud.mdb.mysql.v1.ClusterService.List',
index=1,
containing_service=None,
input_type=_LISTCLUSTERSREQUEST,
output_type=_LISTCLUSTERSRESPONSE,
serialized_options=b'\202\323\344\223\002\034\022\032/managed-mysql/v1/clusters',
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='Create',
full_name='yandex.cloud.mdb.mysql.v1.ClusterService.Create',
index=2,
containing_service=None,
input_type=_CREATECLUSTERREQUEST,
output_type=yandex_dot_cloud_dot_operation_dot_operation__pb2._OPERATION,
serialized_options=b'\202\323\344\223\002\037\"\032/managed-mysql/v1/clusters:\001*\262\322* \n\025CreateClusterMetadata\022\007Cluster',
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='Update',
full_name='yandex.cloud.mdb.mysql.v1.ClusterService.Update',
index=3,
containing_service=None,
input_type=_UPDATECLUSTERREQUEST,
output_type=yandex_dot_cloud_dot_operation_dot_operation__pb2._OPERATION,
serialized_options=b'\202\323\344\223\002,2\'/managed-mysql/v1/clusters/{cluster_id}:\001*\262\322* \n\025UpdateClusterMetadata\022\007Cluster',
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='Delete',
full_name='yandex.cloud.mdb.mysql.v1.ClusterService.Delete',
index=4,
containing_service=None,
input_type=_DELETECLUSTERREQUEST,
output_type=yandex_dot_cloud_dot_operation_dot_operation__pb2._OPERATION,
serialized_options=b'\202\323\344\223\002)*\'/managed-mysql/v1/clusters/{cluster_id}\262\322*.\n\025DeleteClusterMetadata\022\025google.protobuf.Empty',
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='Start',
full_name='yandex.cloud.mdb.mysql.v1.ClusterService.Start',
index=5,
containing_service=None,
input_type=_STARTCLUSTERREQUEST,
output_type=yandex_dot_cloud_dot_operation_dot_operation__pb2._OPERATION,
serialized_options=b'\202\323\344\223\002/\"-/managed-mysql/v1/clusters/{cluster_id}:start\262\322*\037\n\024StartClusterMetadata\022\007Cluster',
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='Stop',
full_name='yandex.cloud.mdb.mysql.v1.ClusterService.Stop',
index=6,
containing_service=None,
input_type=_STOPCLUSTERREQUEST,
output_type=yandex_dot_cloud_dot_operation_dot_operation__pb2._OPERATION,
serialized_options=b'\202\323\344\223\002.\",/managed-mysql/v1/clusters/{cluster_id}:stop\262\322*\036\n\023StopClusterMetadata\022\007Cluster',
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='Move',
full_name='yandex.cloud.mdb.mysql.v1.ClusterService.Move',
index=7,
containing_service=None,
input_type=_MOVECLUSTERREQUEST,
output_type=yandex_dot_cloud_dot_operation_dot_operation__pb2._OPERATION,
serialized_options=b'\202\323\344\223\0021\",/managed-mysql/v1/clusters/{cluster_id}:move:\001*\262\322*\036\n\023MoveClusterMetadata\022\007Cluster',
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='Backup',
full_name='yandex.cloud.mdb.mysql.v1.ClusterService.Backup',
index=8,
containing_service=None,
input_type=_BACKUPCLUSTERREQUEST,
output_type=yandex_dot_cloud_dot_operation_dot_operation__pb2._OPERATION,
serialized_options=b'\202\323\344\223\0020\"./managed-mysql/v1/clusters/{cluster_id}:backup\262\322* \n\025BackupClusterMetadata\022\007Cluster',
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='Restore',
full_name='yandex.cloud.mdb.mysql.v1.ClusterService.Restore',
index=9,
containing_service=None,
input_type=_RESTORECLUSTERREQUEST,
output_type=yandex_dot_cloud_dot_operation_dot_operation__pb2._OPERATION,
serialized_options=b'\202\323\344\223\002\'\"\"/managed-mysql/v1/clusters:restore:\001*\262\322*!\n\026RestoreClusterMetadata\022\007Cluster',
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='RescheduleMaintenance',
full_name='yandex.cloud.mdb.mysql.v1.ClusterService.RescheduleMaintenance',
index=10,
containing_service=None,
input_type=_RESCHEDULEMAINTENANCEREQUEST,
output_type=yandex_dot_cloud_dot_operation_dot_operation__pb2._OPERATION,
serialized_options=b'\202\323\344\223\002B\"=/managed-mysql/v1/clusters/{cluster_id}:rescheduleMaintenance:\001*\262\322*(\n\035RescheduleMaintenanceMetadata\022\007Cluster',
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='StartFailover',
full_name='yandex.cloud.mdb.mysql.v1.ClusterService.StartFailover',
index=11,
containing_service=None,
input_type=_STARTCLUSTERFAILOVERREQUEST,
output_type=yandex_dot_cloud_dot_operation_dot_operation__pb2._OPERATION,
serialized_options=b'\202\323\344\223\002:\"5/managed-mysql/v1/clusters/{cluster_id}:startFailover:\001*\262\322*\'\n\034StartClusterFailoverMetadata\022\007Cluster',
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='ListLogs',
full_name='yandex.cloud.mdb.mysql.v1.ClusterService.ListLogs',
index=12,
containing_service=None,
input_type=_LISTCLUSTERLOGSREQUEST,
output_type=_LISTCLUSTERLOGSRESPONSE,
serialized_options=b'\202\323\344\223\002.\022,/managed-mysql/v1/clusters/{cluster_id}:logs',
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='StreamLogs',
full_name='yandex.cloud.mdb.mysql.v1.ClusterService.StreamLogs',
index=13,
containing_service=None,
input_type=_STREAMCLUSTERLOGSREQUEST,
output_type=_STREAMLOGRECORD,
serialized_options=b'\202\323\344\223\0025\0223/managed-mysql/v1/clusters/{cluster_id}:stream_logs',
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='ListOperations',
full_name='yandex.cloud.mdb.mysql.v1.ClusterService.ListOperations',
index=14,
containing_service=None,
input_type=_LISTCLUSTEROPERATIONSREQUEST,
output_type=_LISTCLUSTEROPERATIONSRESPONSE,
serialized_options=b'\202\323\344\223\0024\0222/managed-mysql/v1/clusters/{cluster_id}/operations',
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='ListBackups',
full_name='yandex.cloud.mdb.mysql.v1.ClusterService.ListBackups',
index=15,
containing_service=None,
input_type=_LISTCLUSTERBACKUPSREQUEST,
output_type=_LISTCLUSTERBACKUPSRESPONSE,
serialized_options=b'\202\323\344\223\0021\022//managed-mysql/v1/clusters/{cluster_id}/backups',
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='ListHosts',
full_name='yandex.cloud.mdb.mysql.v1.ClusterService.ListHosts',
index=16,
containing_service=None,
input_type=_LISTCLUSTERHOSTSREQUEST,
output_type=_LISTCLUSTERHOSTSRESPONSE,
serialized_options=b'\202\323\344\223\002/\022-/managed-mysql/v1/clusters/{cluster_id}/hosts',
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='AddHosts',
full_name='yandex.cloud.mdb.mysql.v1.ClusterService.AddHosts',
index=17,
containing_service=None,
input_type=_ADDCLUSTERHOSTSREQUEST,
output_type=yandex_dot_cloud_dot_operation_dot_operation__pb2._OPERATION,
serialized_options=b'\202\323\344\223\002>\"9/managed-mysql/v1/clusters/{cluster_id}/hosts:batchCreate:\001*\262\322*0\n\027AddClusterHostsMetadata\022\025google.protobuf.Empty',
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='UpdateHosts',
full_name='yandex.cloud.mdb.mysql.v1.ClusterService.UpdateHosts',
index=18,
containing_service=None,
input_type=_UPDATECLUSTERHOSTSREQUEST,
output_type=yandex_dot_cloud_dot_operation_dot_operation__pb2._OPERATION,
serialized_options=b'\202\323\344\223\002>\"9/managed-mysql/v1/clusters/{cluster_id}/hosts:batchUpdate:\001*\262\322*3\n\032UpdateClusterHostsMetadata\022\025google.protobuf.Empty',
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='DeleteHosts',
full_name='yandex.cloud.mdb.mysql.v1.ClusterService.DeleteHosts',
index=19,
containing_service=None,
input_type=_DELETECLUSTERHOSTSREQUEST,
output_type=yandex_dot_cloud_dot_operation_dot_operation__pb2._OPERATION,
serialized_options=b'\202\323\344\223\002>\"9/managed-mysql/v1/clusters/{cluster_id}/hosts:batchDelete:\001*\262\322*3\n\032DeleteClusterHostsMetadata\022\025google.protobuf.Empty',
create_key=_descriptor._internal_create_key,
),
])
_sym_db.RegisterServiceDescriptor(_CLUSTERSERVICE)
DESCRIPTOR.services_by_name['ClusterService'] = _CLUSTERSERVICE
# @@protoc_insertion_point(module_scope)
| true | true |
f71ec15b6669fd47e7a2e067624d3db4587a44da | 52,763 | py | Python | tensorflow/python/keras/engine/base_layer_test.py | dan-zheng/tensorflow | b4245c36bc9bcb752c0a2118728098b359fd97e2 | [
"Apache-2.0"
] | 1 | 2021-01-18T04:50:42.000Z | 2021-01-18T04:50:42.000Z | tensorflow/python/keras/engine/base_layer_test.py | shyampandey1/tensorflow | 5e04065935920b0a07175283408297e73d2191fb | [
"Apache-2.0"
] | 1 | 2019-08-19T08:03:52.000Z | 2019-08-19T08:03:52.000Z | tensorflow/python/keras/engine/base_layer_test.py | shyampandey1/tensorflow | 5e04065935920b0a07175283408297e73d2191fb | [
"Apache-2.0"
] | 1 | 2019-08-23T12:36:08.000Z | 2019-08-23T12:36:08.000Z | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for TensorFlow 2.0 layer behavior."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import itertools as it
import os
import sys
import traceback
from absl.testing import parameterized
import numpy as np
from tensorflow.python import keras
from tensorflow.python.eager import context
from tensorflow.python.eager import def_function
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_spec
from tensorflow.python.framework import test_util
from tensorflow.python.keras import backend
from tensorflow.python.keras import keras_parameterized
from tensorflow.python.keras import testing_utils
from tensorflow.python.keras.engine import base_layer
from tensorflow.python.keras.mixed_precision.experimental import policy
from tensorflow.python.keras.optimizer_v2 import rmsprop
from tensorflow.python.keras.utils import tf_utils
from tensorflow.python.layers import core as legacy_core
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import summary_ops_v2
from tensorflow.python.ops import tensor_array_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import gfile
from tensorflow.python.platform import test
from tensorflow.python.platform import tf_logging
from tensorflow.python.summary import summary_iterator
from tensorflow.python.util import nest
class DynamicLayer(base_layer.Layer):
def __init__(self, dynamic=False, **kwargs):
super(DynamicLayer, self).__init__(dynamic=dynamic, **kwargs)
def call(self, inputs):
samples = tensor_array_ops.TensorArray(
dtype=dtypes.float32, size=array_ops.shape(inputs)[0])
for idx, sample in enumerate(inputs):
samples = samples.write(idx, math_ops.square(sample))
return samples.stack()
def compute_output_shape(self, input_shape):
return input_shape
class InvalidLayer(base_layer.Layer):
def call(self, inputs):
raise ValueError('You did something wrong!')
class BaseLayerTest(keras_parameterized.TestCase):
@keras_parameterized.run_with_all_model_types
def test_dynamic_layer(self):
model = testing_utils.get_model_from_layers([DynamicLayer(dynamic=True)],
input_shape=(3,))
self.assertEqual(model.dynamic, True)
model.compile(rmsprop.RMSprop(0.001), loss='mse')
self.assertEqual(model.run_eagerly, True)
model.train_on_batch(np.random.random((2, 3)), np.random.random((2, 3)))
@keras_parameterized.run_with_all_model_types
def test_dynamic_layer_error(self):
with self.assertRaisesRegexp(TypeError,
'attempting to use Python control flow'):
model = testing_utils.get_model_from_layers([DynamicLayer()],
input_shape=(3,))
model.compile(rmsprop.RMSprop(0.001), loss='mse')
model.train_on_batch(np.random.random((2, 3)), np.random.random((2, 3)))
@keras_parameterized.run_with_all_model_types
def test_dynamic_layer_error_running_in_graph_mode(self):
with context.graph_mode():
model = testing_utils.get_model_from_layers([DynamicLayer(dynamic=True)],
input_shape=(3,))
self.assertEqual(model.dynamic, True)
# But then you cannot run the model since you're in a graph scope.
with self.assertRaisesRegexp(
ValueError, 'You must enable eager execution'):
model.compile(rmsprop.RMSprop(0.001), loss='mse')
def test_manual_compute_output_shape(self):
class BuildCounter(keras.layers.Layer):
def __init__(self, *args, **kwargs): # pylint: disable=redefined-outer-name
super(BuildCounter, self).__init__(*args, **kwargs)
self.build_counter = 0
def build(self, input_shape):
self.build_counter += 1
def call(self, inputs):
return inputs
with context.eager_mode():
layer = BuildCounter(dtype=dtypes.float64)
output_shape = layer.compute_output_shape((None, 10))
self.assertEqual(layer.build_counter, 1)
self.assertEqual(output_shape.as_list(), [None, 10])
output_signature = layer.compute_output_signature(
tensor_spec.TensorSpec(dtype=dtypes.float64, shape=[None, 10]))
self.assertEqual(layer.build_counter, 1)
self.assertEqual(output_signature.dtype, dtypes.float64)
self.assertEqual(output_signature.shape.as_list(), [None, 10])
layer(np.ones((5, 10)))
self.assertEqual(layer.build_counter, 1)
def test_dynamic_layer_with_deferred_sequential_model(self):
model = keras.Sequential(
[DynamicLayer(dynamic=True),
keras.layers.Dense(3)])
self.assertEqual(model.dynamic, True)
model.compile(rmsprop.RMSprop(0.001), loss='mse')
self.assertEqual(model.run_eagerly, True)
model.train_on_batch(np.random.random((2, 3)), np.random.random((2, 3)))
def test_nested_dynamic_layers_in_eager_mode(self):
inputs = keras.Input((3,))
outputs = DynamicLayer(dynamic=True)(inputs)
inner_model = keras.Model(inputs, outputs)
self.assertEqual(inner_model.dynamic, True)
inputs = keras.Input((3,))
x = DynamicLayer(dynamic=True)(inputs)
outputs = inner_model(x)
model = keras.Model(inputs, outputs)
self.assertEqual(model.dynamic, True)
model.compile(rmsprop.RMSprop(0.001), loss='mse')
self.assertEqual(model.run_eagerly, True)
model.train_on_batch(np.random.random((2, 3)), np.random.random((2, 3)))
def test_dynamic_subclassed_model_no_shape_inference(self):
class MyModel(keras.Model):
def __init__(self):
super(MyModel, self).__init__(dynamic=True)
self.layer1 = keras.layers.Dense(3)
self.layer2 = keras.layers.Dense(3)
def call(self, inputs):
if math_ops.reduce_sum(inputs) > 0:
return self.layer1(inputs)
else:
return self.layer2(inputs)
model = MyModel()
self.assertEqual(model.dynamic, True)
model.compile(rmsprop.RMSprop(0.001), loss='mse')
self.assertEqual(model.run_eagerly, True)
model.train_on_batch(np.random.random((2, 3)), np.random.random((2, 3)))
self.assertEqual(model.outputs, [None])
def test_dynamic_subclassed_model_with_shape_inference(self):
class MyModel(keras.Model):
def __init__(self):
super(MyModel, self).__init__(dynamic=True)
self.layer1 = keras.layers.Dense(3)
self.layer2 = keras.layers.Dense(3)
def call(self, inputs):
if math_ops.reduce_sum(inputs) > 0:
return self.layer1(inputs)
else:
return self.layer2(inputs)
def compute_output_shape(self, input_shape):
return tensor_shape.TensorShape(
tuple(input_shape[:-1].as_list()) + (3,))
model = MyModel()
self.assertEqual(model.dynamic, True)
model.compile(rmsprop.RMSprop(0.001), loss='mse')
model.train_on_batch(np.random.random((2, 3)), np.random.random((2, 3)))
self.assertEqual(model.outputs[0].shape.as_list(), [None, 3])
@keras_parameterized.run_all_keras_modes
def test_add_loss_correctness(self):
class MyLayer(keras.layers.Layer):
def call(self, inputs, training=None):
self.add_loss(math_ops.reduce_sum(inputs))
return inputs
inputs = keras.Input((3,))
layer = MyLayer()
outputs = layer(inputs)
model = keras.Model(inputs, outputs)
self.assertEqual(len(model.losses), 1)
model.compile(
'sgd',
'mse',
run_eagerly=testing_utils.should_run_eagerly(),
experimental_run_tf_function=testing_utils.should_run_tf_function())
loss = model.train_on_batch(np.ones((2, 3)), np.ones((2, 3)))
self.assertEqual(loss, 2 * 3)
@test_util.run_in_graph_and_eager_modes
def test_invalid_forward_pass(self):
inputs = keras.Input((3,))
with self.assertRaisesRegexp(ValueError, 'You did something wrong!'):
_ = InvalidLayer()(inputs)
def test_no_legacy_model(self):
inputs = keras.Input((1,))
legacy_dense_0 = legacy_core.Dense(1, name='legacy_dense_0')
legacy_dense_1 = legacy_core.Dense(1, name='legacy_dense_1')
layer = legacy_dense_0(inputs)
layer = keras.layers.Dense(1)(layer)
layer = legacy_dense_1(layer)
expected_regex = (r'The following are legacy tf\.layers\.Layers:\n '
'{}\n {}'.format(legacy_dense_0, legacy_dense_1))
with self.assertRaisesRegexp(TypeError, expected_regex):
_ = keras.models.Model(inputs=[inputs], outputs=[layer])
model = keras.models.Model(inputs=[inputs], outputs=[inputs])
with self.assertRaisesRegexp(TypeError, expected_regex):
model._insert_layers([legacy_dense_0, legacy_dense_1])
def test_no_legacy_sequential(self):
layers = [
keras.layers.Dense(1),
legacy_core.Dense(1, name='legacy_dense_0')
]
expected_regex = r'legacy tf\.layers\.Layers:\n {}'.format(layers[1])
with self.assertRaisesRegexp(TypeError, expected_regex):
_ = keras.models.Sequential(layers)
with self.assertRaisesRegexp(TypeError, expected_regex):
_ = keras.models.Sequential([keras.layers.Input(shape=(4,))] + layers)
model = keras.models.Sequential()
with self.assertRaisesRegexp(TypeError, expected_regex):
for l in layers:
model.add(l)
@keras_parameterized.run_with_all_model_types
@test_util.run_in_graph_and_eager_modes
def test_build_with_numpy_data(self):
model_layers = [
keras.layers.Dense(3, activation='relu', kernel_initializer='ones'),
keras.layers.Dense(1, activation='sigmoid', kernel_initializer='ones')
]
model = testing_utils.get_model_from_layers(model_layers, input_shape=(4,))
model(np.zeros((2, 4), dtype='float32'))
self.assertTrue(model.built)
@test_util.run_in_graph_and_eager_modes
def test_default_add_weight(self):
class TestLayer(keras.layers.Layer):
def __init__(self):
super(TestLayer, self).__init__()
self.default_weight = self.add_weight()
self.weight_without_name = self.add_weight(shape=(3, 4))
self.regularized_weight_without_name = self.add_weight(
shape=(3, 4), regularizer='l2')
layer = TestLayer()
self.assertEqual(layer.default_weight.shape.as_list(), [])
self.assertEqual(layer.weight_without_name.shape.as_list(), [3, 4])
self.assertEqual(layer.default_weight.dtype.name, 'float32')
self.assertEqual(layer.weight_without_name.dtype.name, 'float32')
self.assertEqual(len(layer.losses), 1)
if not context.executing_eagerly():
# Cannot access tensor.name in eager execution.
self.assertTrue('Variable_2/Regularizer' in layer.losses[0].name)
@keras_parameterized.run_all_keras_modes(always_skip_v1=True)
def test_learning_phase_freezing_for_layers(self):
class LearningPhaseLayer(keras.layers.Layer):
def call(self, inputs):
return keras.backend.in_train_phase(
lambda: array_ops.ones_like(inputs),
lambda: array_ops.zeros_like(inputs))
def get_learning_phase_value():
model = keras.models.Sequential([LearningPhaseLayer(input_shape=(1,))])
model._run_eagerly = testing_utils.should_run_eagerly()
model._experimental_run_tf_function = (
testing_utils.should_run_tf_function())
return np.sum(model(np.ones((1, 1))))
self.assertEqual(get_learning_phase_value(), 0)
# Test scope.
with keras.backend.learning_phase_scope(1):
self.assertEqual(get_learning_phase_value(), 1)
# The effects of the scope end after exiting it.
self.assertEqual(get_learning_phase_value(), 0)
# Test setting.
keras.backend.set_learning_phase(1)
self.assertEqual(get_learning_phase_value(), 1)
keras.backend.set_learning_phase(0)
self.assertEqual(get_learning_phase_value(), 0)
@keras_parameterized.run_all_keras_modes
def test_learning_phase_freezing_for_layers_in_predict(self):
if not (testing_utils.should_run_eagerly() or
testing_utils.should_run_tf_function()):
self.skipTest('Predict fails to override the outer learning phase in'
'the FuncGraph path.')
class LearningPhaseLayer(keras.layers.Layer):
def call(self, inputs):
return keras.backend.in_train_phase(
lambda: array_ops.ones_like(inputs),
lambda: array_ops.zeros_like(inputs))
def get_learning_phase_value():
model = keras.models.Sequential([LearningPhaseLayer(input_shape=(1,))])
model._run_eagerly = testing_utils.should_run_eagerly()
model._experimental_run_tf_function = (
testing_utils.should_run_tf_function())
return np.sum(model.predict(np.ones((1, 1))))
self.assertEqual(get_learning_phase_value(), 0)
# Test scope.
with keras.backend.learning_phase_scope(1):
self.assertEqual(get_learning_phase_value(), 0)
# The effects of the scope end after exiting it.
self.assertEqual(get_learning_phase_value(), 0)
# Test setting.
keras.backend.set_learning_phase(1)
self.assertEqual(get_learning_phase_value(), 0)
keras.backend.set_learning_phase(0)
self.assertEqual(get_learning_phase_value(), 0)
# Cannot be enabled with `run_eagerly=True`, see b/123904578
@test_util.run_all_in_graph_and_eager_modes
def test_layer_can_return_variable(self):
class ComputeSum(keras.layers.Layer):
def __init__(self):
super(ComputeSum, self).__init__()
self.total = variables.Variable(
initial_value=array_ops.zeros((1, 1)), trainable=False)
if not context.executing_eagerly():
keras.backend.get_session().run(self.total.initializer)
def call(self, inputs):
self.total.assign_add(inputs)
return self.total
inputs = keras.Input(shape=(1,))
model = keras.Model(inputs, ComputeSum()(inputs))
model.predict(np.ones((1, 1)))
def _get_layer_with_training_arg(self):
class TrainingLayer(keras.layers.Layer):
"""A layer with a `training` argument in a defuned `call`."""
@def_function.function
def call(self, inputs, training=None):
if training is None:
training = keras.backend.learning_phase()
return tf_utils.smart_cond(training,
lambda: array_ops.ones_like(inputs),
lambda: array_ops.zeros_like(inputs))
return TrainingLayer()
@keras_parameterized.run_with_all_model_types
# b/124459427: can't test with `run_eagerly=True` for now.
@test_util.run_in_graph_and_eager_modes
def test_training_arg_in_defun(self):
layer = self._get_layer_with_training_arg()
model = testing_utils.get_model_from_layers([layer], input_shape=(1,))
model.compile(rmsprop.RMSprop(0.),
loss='mae')
history = model.fit(np.zeros((1, 1)), np.zeros((1, 1)))
self.assertEqual(history.history['loss'][0], 1.)
loss = model.evaluate(np.zeros((1, 1)), np.zeros((1, 1)))
self.assertEqual(loss, 0.)
# Test that the argument injection performed in `call` is not active
# when the argument is passed explicitly.
layer = self._get_layer_with_training_arg()
inputs = keras.Input(shape=(1,))
# Pass `training` by name
outputs = layer(inputs, training=False)
model = keras.Model(inputs, outputs)
model.compile(rmsprop.RMSprop(0.),
loss='mae')
history = model.fit(np.zeros((1, 1)), np.zeros((1, 1)))
self.assertEqual(history.history['loss'][0], 0.)
@keras_parameterized.run_with_all_model_types
@keras_parameterized.run_all_keras_modes
def test_raw_variable_assignment(self):
class RawVariableLayer(keras.layers.Layer):
def __init__(self, **kwargs):
super(RawVariableLayer, self).__init__(**kwargs)
# Test variables in nested structure.
self.var_list = [variables.Variable(1.), {'a': variables.Variable(2.)}]
def call(self, inputs):
return inputs * self.var_list[0] * self.var_list[1]['a']
model = testing_utils.get_model_from_layers([RawVariableLayer()],
input_shape=(10,))
model.compile(
'sgd',
'mse',
run_eagerly=testing_utils.should_run_eagerly(),
experimental_run_tf_function=testing_utils.should_run_tf_function())
x, y = np.ones((10, 10)), np.ones((10, 10))
# Checks that variables get initialized.
model.fit(x, y, batch_size=2, epochs=2)
@test_util.run_in_graph_and_eager_modes
def test_layer_names(self):
inputs = keras.layers.Input(shape=[2])
add1 = inputs + inputs
add2 = keras.layers.Add()([inputs, inputs])
add3 = inputs + inputs
add4 = keras.layers.Add()([inputs, inputs])
model = keras.models.Model(
inputs=[inputs], outputs=[add1, add2, add3, add4])
self.assertEqual(
[l.name for l in model.layers],
['input_1', 'tf_op_layer_add', 'add', 'tf_op_layer_add_2', 'add_1'])
def test_add_trainable_weight_on_frozen_layer(self):
class TestLayer(keras.layers.Layer):
def build(self, input_shape):
self.w = self.add_weight(shape=(), trainable=True)
def call(self, inputs):
return self.w * inputs
layer = TestLayer()
layer.trainable = False
layer.build(None)
layer.trainable = True
self.assertListEqual(layer.trainable_weights, [layer.w])
@keras_parameterized.run_with_all_model_types
@keras_parameterized.run_all_keras_modes
def test_passing_initial_weights_values(self):
kernel_value = np.random.random((10, 2))
layer_with_weights = keras.layers.Dense(
2, use_bias=False, weights=[kernel_value])
model = testing_utils.get_model_from_layers([layer_with_weights],
input_shape=(10,))
model.compile(
'sgd',
'mse',
run_eagerly=testing_utils.should_run_eagerly(),
experimental_run_tf_function=testing_utils.should_run_tf_function())
inputs = np.random.random((3, 10))
out = model.predict(inputs)
self.assertAllClose(model.layers[-1].get_weights()[0], kernel_value)
self.assertAllClose(out, np.dot(inputs, kernel_value))
@test_util.run_in_graph_and_eager_modes
def test_set_weights_and_get_weights(self):
layer = keras.layers.Dense(2)
layer.build((None, 10))
kernel = np.random.random((10, 2))
bias = np.random.random((2,))
layer.set_weights([kernel, bias])
weights = layer.get_weights()
self.assertEqual(len(weights), 2)
self.assertAllClose(weights[0], kernel)
self.assertAllClose(weights[1], bias)
with self.assertRaisesRegexp(
ValueError, 'but the layer was expecting 2 weights'):
layer.set_weights([1, 2, 3])
with self.assertRaisesRegexp(
ValueError, 'not compatible with provided weight shape'):
layer.set_weights([kernel.T, bias])
def test_get_config_error(self):
class MyLayer(keras.layers.Layer):
def __init__(self, my_kwarg='default', **kwargs):
super(MyLayer, self).__init__(**kwargs)
self.my_kwarg = my_kwarg
# `__init__` includes kwargs but `get_config` is not overridden, so
# an error should be thrown:
with self.assertRaises(NotImplementedError):
MyLayer('custom').get_config()
class MyLayerNew(keras.layers.Layer):
def __init__(self, my_kwarg='default', **kwargs):
super(MyLayerNew, self).__init__(**kwargs)
self.my_kwarg = my_kwarg
def get_config(self):
config = super(MyLayerNew, self).get_config()
config['my_kwarg'] = self.my_kwarg
return config
# Test to make sure that error is not raised if the method call is
# from an overridden `get_config`:
self.assertEqual(MyLayerNew('custom').get_config()['my_kwarg'], 'custom')
class MyLayerNew2(keras.layers.Layer):
def __init__(self, name='MyLayerName', dtype=None, **kwargs): # pylint:disable=redefined-outer-name
super(MyLayerNew2, self).__init__(name=name, dtype=dtype, **kwargs)
# Check that if the kwargs in `__init__` are base layer constructor
# arguments, no error is thrown:
self.assertEqual(MyLayerNew2(name='New').get_config()['name'], 'New')
class SymbolicSupportTest(test.TestCase):
def test_using_symbolic_tensors_with_tf_ops(self):
# Single-input.
x = keras.Input((3,))
y = math_ops.square(x)
self.assertEqual(y.graph, keras.backend.get_graph())
# Multi-inputs.
x1, x2 = keras.Input((3,)), keras.Input((3,))
y = array_ops.concat([x1, x2], axis=1)
self.assertEqual(y.graph, keras.backend.get_graph())
# Mixing Keras symbolic tensors and graph tensors from the same graph works.
with keras.backend.get_graph().as_default():
x1 = keras.Input((3,))
x2 = keras.Input((3,))
y = math_ops.matmul(x1, x2)
self.assertEqual(y.graph, keras.backend.get_graph())
# Creating same op type (matmul) multiple times in the Keras graph works.
x1 = keras.Input((3,))
x2 = keras.Input((3,))
y = math_ops.matmul(x1, x2)
self.assertEqual(y.graph, keras.backend.get_graph())
def test_mixing_eager_and_graph_tensors(self):
with ops.Graph().as_default():
x1 = array_ops.ones((3, 3))
x2 = array_ops.ones((3, 3))
self.assertIsInstance(x2, ops.EagerTensor)
with self.assertRaisesRegexp(TypeError, 'Graph tensors'):
math_ops.matmul(x1, x2)
def test_mixing_numpy_arrays_and_graph_tensors(self):
with ops.Graph().as_default():
x1 = array_ops.ones((3, 3))
x2 = np.ones((3, 3), dtype='float32')
with self.assertRaisesRegexp(TypeError, 'Graph tensors'):
math_ops.matmul(x1, x2)
@test_util.run_in_graph_and_eager_modes
def test_mixing_keras_symbolic_tensors_and_eager_tensors(self):
x1 = keras.Input((3,))
x2 = array_ops.ones((3, 3))
y = math_ops.matmul(x1, x2)
self.assertEqual(y.graph, keras.backend.get_graph())
fn = keras.backend.function(inputs=[x1], outputs=[y])
x_val = np.random.random((3, 3))
y_val = np.ones((3, 3))
self.assertAllClose(fn([x_val])[0],
np.matmul(x_val, y_val),
atol=1e-5)
@test_util.run_in_graph_and_eager_modes
def test_mixing_keras_symbolic_tensors_and_numpy_arrays(self):
x1 = keras.Input((3,))
x2 = np.ones((3, 3), dtype='float32')
y = math_ops.matmul(x1, x2)
self.assertEqual(y.graph, keras.backend.get_graph())
fn = keras.backend.function(inputs=[x1], outputs=[y])
x_val = np.random.random((3, 3))
y_val = np.ones((3, 3))
self.assertAllClose(fn([x_val])[0],
np.matmul(x_val, y_val),
atol=1e-5)
@test_util.run_in_graph_and_eager_modes
def test_reraising_exception(self):
# When layer is not dynamic, we have some pattern matching during exception
# handling to detect when the user is trying to use python control flow.
# When an exception is thrown but the pattern doesn't match, we want to
# preserve the originating stack trace. An early implementation of this
# logic lost the stack trace. We test the correct behavior here.
class TypeErrorLayer(base_layer.Layer):
def call(self, inputs):
def easily_identifiable_name():
raise TypeError('Non-matching TypeError message.')
easily_identifiable_name()
inputs = keras.Input((3,))
try:
_ = TypeErrorLayer()(inputs)
except TypeError as e:
if hasattr(e, 'ag_error_metadata'):
self.assertIn('easily_identifiable_name', str(e))
# See ErrorMetadataBase in autograph/pyct/errors.py
# Topmost frame corresponds to `call` itself.
function_name = e.ag_error_metadata.translated_stack[-2].function_name
else:
tb = traceback.extract_tb(sys.exc_info()[2])
last_entry = tb[-1]
function_name = last_entry[2]
self.assertEqual(function_name, 'easily_identifiable_name')
@test_util.run_in_graph_and_eager_modes
def test_summaries_in_tf_function(self):
if not context.executing_eagerly():
return
class MyLayer(keras.layers.Layer):
def call(self, inputs):
summary_ops_v2.scalar('mean', math_ops.reduce_mean(inputs))
return inputs
tmp_dir = self.get_temp_dir()
writer = summary_ops_v2.create_file_writer_v2(tmp_dir)
with writer.as_default(), summary_ops_v2.always_record_summaries():
my_layer = MyLayer()
x = array_ops.ones((10, 10))
def my_fn(x):
return my_layer(x)
_ = my_fn(x)
event_file = gfile.Glob(os.path.join(tmp_dir, 'events*'))
self.assertLen(event_file, 1)
event_file = event_file[0]
tags = set()
for e in summary_iterator.summary_iterator(event_file):
for val in e.summary.value:
tags.add(val.tag)
self.assertEqual(set(['my_layer/mean']), tags)
@test_util.run_all_in_graph_and_eager_modes
class NestedTrackingTest(test.TestCase):
def test_nested_layer_variable_tracking(self):
# Test that variables from nested sublayers are
# being tracked by subclassed layers.
class MyLayer(keras.layers.Layer):
def __init__(self):
super(MyLayer, self).__init__()
self.dense1 = keras.layers.Dense(1)
self.dense2 = keras.layers.BatchNormalization()
def build(self, input_shape):
self.v1 = self.add_weight('v1', shape=input_shape[1:].as_list())
self.v2 = variables.Variable(
name='v2',
initial_value=np.zeros(input_shape[1:].as_list(), dtype='float32'),
trainable=False)
def call(self, inputs):
x = self.dense1(inputs) + self.dense2(inputs)
return x + self.v1 + self.v2
layer = MyLayer()
inputs = keras.Input((1,))
_ = layer(inputs)
self.assertEqual(len(layer.weights), 8)
self.assertEqual(len(layer.trainable_weights), 5)
self.assertEqual(len(layer.non_trainable_weights), 3)
layer.dense1.trainable = False
self.assertEqual(len(layer.weights), 8)
self.assertEqual(len(layer.trainable_weights), 3)
self.assertEqual(len(layer.non_trainable_weights), 5)
layer.trainable = False
self.assertEqual(len(layer.weights), 8)
self.assertEqual(len(layer.trainable_weights), 0)
self.assertEqual(len(layer.non_trainable_weights), 8)
self.assertEqual(
set([layer.dense1, layer.dense2, layer.v1, layer.v2]),
set([obj for unused_name, obj in layer._checkpoint_dependencies]))
def test_nested_layer_updates_losses_tracking(self):
# Test that updates and losses from nested sublayers are
# being tracked by subclassed layers.
class UpdateAndLossLayer(keras.layers.Layer):
def build(self, _):
self.v1 = self.add_weight('v1', shape=())
def call(self, inputs):
self.add_loss(math_ops.reduce_sum(inputs))
self.add_update(state_ops.assign_add(self.v1, 1))
return inputs + 1
class MyLayer(keras.layers.Layer):
def build(self, _):
self.v1 = self.add_weight('v1', shape=())
def __init__(self):
super(MyLayer, self).__init__()
self.ul1 = UpdateAndLossLayer()
self.ul2 = UpdateAndLossLayer()
def call(self, inputs):
self.add_loss(math_ops.reduce_sum(inputs))
self.add_update(state_ops.assign_add(self.v1, 1))
x = self.ul1(inputs)
return self.ul2(x)
layer = MyLayer()
if context.executing_eagerly():
inputs = array_ops.ones((3, 1))
_ = layer(inputs)
self.assertEqual(len(layer.losses), 3)
self.assertLen(layer.get_losses_for(None), 3)
else:
inputs = keras.Input((1,))
_ = layer(inputs)
self.assertEqual(len(layer.losses), 3)
self.assertEqual(len(layer.updates), 3)
self.assertLen(layer.get_losses_for(None), 3)
def test_attribute_reassignment(self):
l = keras.layers.Layer()
l.a = keras.layers.Layer()
l.a = []
l.a = variables.Variable(1.)
l.a = keras.layers.Layer()
last_assignment = keras.layers.Layer()
l.a = last_assignment
l.b = variables.Variable(1.)
del l.b
l.c = keras.layers.Layer()
del l.c
l.d = last_assignment
del l.d
self.assertEqual([last_assignment], l._layers)
self.assertEqual([], l.trainable_weights)
self.assertEqual([], l.non_trainable_weights)
self.assertEqual([], l.weights)
del l.a
self.assertEqual([], l._layers)
def test_assign_op_not_tracked_as_variable(self):
class LayerWithAssignAttr(keras.layers.Layer):
def build(self, input_shape):
self.v = variables.Variable(1.)
self.v_assign = self.v.assign_add(2.)
layer = LayerWithAssignAttr()
layer.build((10, 10))
self.assertEqual([layer.v], layer.variables)
def test_layer_class_not_tracked_as_sublayer(self):
# See https://github.com/tensorflow/tensorflow/issues/27431 for details.
class LayerWithClassAttribute(keras.layers.Layer):
def __init__(self):
super(LayerWithClassAttribute, self).__init__()
self.layer_fn = keras.layers.Dense
layer = LayerWithClassAttribute()
self.assertEmpty(layer.variables)
self.assertEmpty(layer.submodules)
def test_layer_call_fn_args(self):
class NonDefunLayer(keras.layers.Layer):
def call(self, inputs, a, mask, b=None, training=None):
return inputs
class DefunLayer(keras.layers.Layer):
@def_function.function
def call(self, x, mask, a, training=None, b=None):
return x
nondefun_layer = NonDefunLayer()
self.assertEqual(nondefun_layer._call_fn_args,
['inputs', 'a', 'mask', 'b', 'training'])
defun_layer = DefunLayer()
self.assertEqual(defun_layer._call_fn_args,
['x', 'mask', 'a', 'training', 'b'])
@test_util.run_all_in_graph_and_eager_modes
class NameScopingTest(keras_parameterized.TestCase):
def test_name_scope_layer(self):
x = keras.backend.placeholder(shape=(10, 10))
layer = keras.layers.Dense(10, name='MyName')
layer(x)
self.assertEqual(layer.bias.name, 'MyName/bias:0')
self.assertEqual(layer.kernel.name, 'MyName/kernel:0')
def test_name_scope_sublayer(self):
class NameScopeTracker(keras.layers.Layer):
def call(self, inputs):
self.active_name_scope = ops.get_name_scope()
return inputs
x = keras.backend.placeholder(shape=(10, 10))
sublayer = NameScopeTracker(name='Sublayer')
layer = keras.layers.Dense(10, activation=sublayer, name='MyName2')
layer(x)
self.assertEqual(layer.bias.name, 'MyName2/bias:0')
self.assertEqual(layer.kernel.name, 'MyName2/kernel:0')
self.assertEqual(sublayer.active_name_scope, 'MyName2/Sublayer')
def test_name_scope_tf_tensor(self):
x = ops.convert_to_tensor(np.ones((10, 10)))
layer = keras.layers.Dense(
10, activation=keras.layers.ReLU(name='MyAct'), name='MyName3')
layer(x)
self.assertEqual(layer.bias.name, 'MyName3/bias:0')
self.assertEqual(layer.kernel.name, 'MyName3/kernel:0')
@keras_parameterized.run_all_keras_modes(always_skip_v1=True)
class AutographControlFlowTest(keras_parameterized.TestCase):
def test_disabling_in_context_is_matched(self):
test_obj = self
class MyLayer(keras.layers.Layer):
def call(self, inputs, training=None):
with test_obj.assertRaisesRegex(TypeError, 'Tensor.*as.*bool'):
if constant_op.constant(False):
return inputs * 1.
return inputs * 0.
@def_function.function(autograph=False)
def test_fn():
return MyLayer()(constant_op.constant([[1., 2., 3.]]))
test_fn()
def test_if_training_pattern_output(self):
class MyLayer(keras.layers.Layer):
def call(self, inputs, training=None):
if training:
return inputs * 1.
return inputs * 0.
inputs = keras.Input((3,))
outputs = MyLayer()(inputs)
model = keras.Model(inputs, outputs)
model.compile(
'sgd',
'mse',
run_eagerly=testing_utils.should_run_eagerly(),
experimental_run_tf_function=testing_utils.should_run_tf_function())
train_loss = model.train_on_batch(np.ones((2, 3)), np.ones((2, 3)))
self.assertEqual(train_loss, 0.)
test_loss = model.test_on_batch(np.ones((2, 3)), np.ones((2, 3)))
self.assertEqual(test_loss, 1.)
def test_if_training_pattern_loss(self):
class MyLayer(keras.layers.Layer):
def call(self, inputs, training=None):
if training:
loss = math_ops.reduce_sum(inputs)
else:
loss = 0.
self.add_loss(loss)
return inputs
inputs = keras.Input((3,))
outputs = MyLayer()(inputs)
model = keras.Model(inputs, outputs)
model.compile(
'sgd',
'mse',
run_eagerly=testing_utils.should_run_eagerly(),
experimental_run_tf_function=testing_utils.should_run_tf_function())
train_loss = model.train_on_batch(np.ones((2, 3)), np.ones((2, 3)))
self.assertEqual(train_loss, 2 * 3)
test_loss = model.test_on_batch(np.ones((2, 3)), np.ones((2, 3)))
self.assertEqual(test_loss, 0)
def test_if_training_pattern_metric(self):
class MyLayer(keras.layers.Layer):
def call(self, inputs, training=None):
if training:
metric = math_ops.reduce_sum(inputs)
else:
metric = 0.
self.add_metric(metric, name='my_metric', aggregation='mean')
return inputs
inputs = keras.Input((3,))
outputs = MyLayer()(inputs)
model = keras.Model(inputs, outputs)
model.compile(
'sgd',
'mse',
run_eagerly=testing_utils.should_run_eagerly(),
experimental_run_tf_function=testing_utils.should_run_tf_function())
_, train_metric = model.train_on_batch(np.ones((2, 3)),
np.ones((2, 3)))
self.assertEqual(train_metric, 2 * 3)
_, test_metric = model.test_on_batch(np.ones((2, 3)),
np.ones((2, 3)))
self.assertEqual(test_metric, 0)
def test_if_training_pattern_update(self):
class MyLayer(keras.layers.Layer):
def build(self, input_shape):
self.counter = self.add_weight(
shape=(), trainable=False, initializer='zeros')
def call(self, inputs, training=None):
if training:
increment = 1.
else:
increment = 0.
self.counter.assign_add(increment)
return inputs
inputs = keras.Input((3,))
layer = MyLayer()
outputs = layer(inputs)
model = keras.Model(inputs, outputs)
model.compile(
'sgd',
'mse',
run_eagerly=testing_utils.should_run_eagerly(),
experimental_run_tf_function=testing_utils.should_run_tf_function())
model.train_on_batch(np.ones((2, 3)), np.ones((2, 3)))
self.assertEqual(keras.backend.get_value(layer.counter), 1.)
def test_conditional_updates_in_call(self):
class MyLayer(keras.layers.Layer):
def __init__(self):
super(MyLayer,
self).__init__(dynamic=testing_utils.should_run_eagerly())
def build(self, input_shape):
self.counter = self.add_weight(
shape=(), trainable=False, initializer='zeros')
def call(self, inputs, training=None):
if training:
z = math_ops.reduce_sum(inputs)
self.add_update(lambda: self.counter.assign_add(z))
return inputs
def compute_output_shape(self, input_shape):
return input_shape
if testing_utils.should_run_eagerly():
inputs = keras.Input((3,))
layer = MyLayer()
outputs = layer(inputs)
model = keras.Model(inputs, outputs)
model.compile(
'sgd',
'mse',
run_eagerly=testing_utils.should_run_eagerly(),
experimental_run_tf_function=testing_utils.should_run_tf_function())
model.train_on_batch(np.ones((2, 3)), np.ones((2, 3)))
self.assertEqual(keras.backend.get_value(layer.counter), 6.)
else:
# TODO(fchollet): support the same workflow in graph mode.
with self.assertRaisesRegexp(RuntimeError,
'`add_update` in a control flow branch'):
layer = MyLayer()
layer(keras.Input((3,)))
_ = layer.updates
def test_conditional_losses_in_call(self):
class MyLayer(keras.layers.Layer):
def __init__(self):
super(MyLayer,
self).__init__(dynamic=testing_utils.should_run_eagerly())
def call(self, inputs, training=None):
if training:
self.add_loss(math_ops.reduce_sum(inputs))
return inputs
def compute_output_shape(self, input_shape):
return input_shape
if testing_utils.should_run_eagerly():
inputs = keras.Input((3,))
layer = MyLayer()
outputs = layer(inputs)
model = keras.Model(inputs, outputs)
model.compile(
'sgd',
'mse',
run_eagerly=testing_utils.should_run_eagerly(),
experimental_run_tf_function=testing_utils.should_run_tf_function())
loss = model.train_on_batch(np.ones((2, 3)), np.ones((2, 3)))
self.assertEqual(loss, 2 * 3)
else:
with self.assertRaisesRegexp(RuntimeError,
'`add_loss` in a control flow branch'):
layer = MyLayer()(keras.Input((3,)))
def test_conditional_callable_losses(self):
model = keras.Sequential([
keras.layers.Dense(
1, kernel_regularizer=keras.regularizers.l2(1e-4), input_shape=(1,))
])
model._run_eagerly = testing_utils.should_run_eagerly()
model._experimental_run_tf_function = testing_utils.should_run_tf_function()
def assert_graph(t):
if not context.executing_eagerly():
self.assertEqual(t.graph, ops.get_default_graph())
@def_function.function
def get_losses(t):
if t < 0:
return math_ops.reduce_sum(model.losses) * t
else:
return math_ops.reduce_sum(model.losses)
assert_graph(get_losses(constant_op.constant(2.)))
assert_graph(get_losses(constant_op.constant(0.5)))
def test_conditional_metrics_in_call(self):
class MyLayer(keras.layers.Layer):
def __init__(self):
super(MyLayer,
self).__init__(dynamic=testing_utils.should_run_eagerly())
def call(self, inputs, training=None):
if training:
self.add_metric(math_ops.reduce_sum(inputs),
name='sum',
aggregation='mean')
return inputs
def compute_output_shape(self, input_shape):
return input_shape
if testing_utils.should_run_eagerly():
inputs = keras.Input((3,))
layer = MyLayer()
outputs = layer(inputs)
model = keras.Model(inputs, outputs)
model.compile(
'sgd',
'mse',
run_eagerly=testing_utils.should_run_eagerly(),
experimental_run_tf_function=testing_utils.should_run_tf_function())
history = model.fit(np.ones((2, 3)), np.ones((2, 3)))
self.assertEqual(history.history['sum'][-1], 2 * 3)
else:
# TODO(fchollet): support the same workflow in graph mode.
with self.assertRaisesRegexp(RuntimeError,
'`add_metric` in a control flow branch'):
layer = MyLayer()(keras.Input((3,)))
def test_conditional_activity_regularizer_in_call(self):
class TestModel(keras.Model):
def __init__(self):
super(TestModel, self).__init__(
name='test_model', dynamic=testing_utils.should_run_eagerly())
self.layer = keras.layers.Dense(2, activity_regularizer='l2')
def call(self, x, training=None):
if math_ops.greater(math_ops.reduce_sum(x), 0.0):
return self.layer(x)
else:
return self.layer(x)
model = TestModel()
model.compile(
loss='mse',
optimizer='sgd',
run_eagerly=testing_utils.should_run_eagerly(),
experimental_run_tf_function=testing_utils.should_run_tf_function())
x = np.ones(shape=(10, 1))
y = np.ones(shape=(10, 2))
if testing_utils.should_run_eagerly():
model.fit(x, y, epochs=2, batch_size=5)
else:
with self.assertRaisesRegexp(
RuntimeError, '`activity_regularizer` in a control flow branch'):
model.fit(x, y, epochs=2, batch_size=5)
def test_conditional_activity_regularizer_with_wrappers_in_call(self):
class TestModel(keras.Model):
def __init__(self):
super(TestModel, self).__init__(
name='test_model', dynamic=testing_utils.should_run_eagerly())
self.layer = keras.layers.TimeDistributed(
keras.layers.Dense(2, activity_regularizer='l2'),
input_shape=(3, 4))
def call(self, x, training=None):
if math_ops.greater(math_ops.reduce_sum(x), 0.0):
return self.layer(x)
else:
return self.layer(x)
model = TestModel()
model.compile(
loss='mse',
optimizer='sgd',
run_eagerly=testing_utils.should_run_eagerly(),
experimental_run_tf_function=testing_utils.should_run_tf_function())
x = np.ones(shape=(10, 3, 4))
y = np.ones(shape=(10, 3, 2))
if testing_utils.should_run_eagerly():
model.fit(x, y, epochs=2, batch_size=5)
else:
with self.assertRaisesRegexp(
RuntimeError, '`activity_regularizer` in a control flow branch'):
model.fit(x, y, epochs=2, batch_size=5)
class AddLayer(keras.layers.Layer):
"""A layer which adds it's input to a variable.
Useful for testing a layer with a variable
"""
def build(self, _):
self.v = self.add_weight('v', (), initializer='ones')
self.built = True
def call(self, inputs):
return inputs + self.v
class IdentityLayer(keras.layers.Layer):
"""A layer that returns it's input.
Useful for testing a layer without a variable.
"""
def call(self, inputs):
return inputs
@test_util.run_all_in_graph_and_eager_modes
class DTypeTest(keras_parameterized.TestCase):
# This class only have tests relating to layer.dtype. Tests for dtype policies
# are in mixed_precision/experimental/keras_test.py
def _const(self, dtype):
return array_ops.constant(1, dtype=dtype)
@testing_utils.enable_v2_dtype_behavior
def test_dtype_defaults_to_floatx(self):
layer = AddLayer()
self.assertEqual(layer.dtype, 'float32')
layer(self._const('float64'))
self.assertEqual(layer.dtype, 'float32') # dtype should not change
try:
backend.set_floatx('float64')
layer = AddLayer()
self.assertEqual(layer.dtype, 'float64')
finally:
backend.set_floatx('float32')
@testing_utils.enable_v2_dtype_behavior
def test_passing_dtype_to_constructor(self):
layer = IdentityLayer(dtype='float64')
layer(self._const('float32'))
self.assertEqual(layer.dtype, 'float64')
layer = IdentityLayer(dtype='int32')
layer(self._const('float32'))
self.assertEqual(layer.dtype, 'int32')
layer = IdentityLayer(dtype=dtypes.float64)
layer(self._const('float32'))
self.assertEqual(layer.dtype, 'float64')
@testing_utils.enable_v2_dtype_behavior
def input_cast_to_dtype(self):
layer = AddLayer()
# Input should be cast to layer.dtype, so output should also be layer.dtype
self.assertEqual(layer(self._const('float64')).dtype, 'float32')
layer = AddLayer(dtype='float64')
self.assertEqual(layer(self._const('float32')).dtype, 'float64')
# Test inputs are not casted if layer.dtype is not floating-point
layer = IdentityLayer(dtype='int32')
self.assertEqual(layer(self._const('float64')).dtype, 'float64')
# Test inputs are not casted if the inputs are not floating-point
layer = IdentityLayer(dtype='float32')
self.assertEqual(layer(self._const('int32')).dtype, 'int32')
# Test Numpy arrays are casted
layer = IdentityLayer(dtype='float64')
self.assertEqual(layer(np.array(1, dtype='float32')).dtype, 'float64')
# Test Python floats are casted
layer = IdentityLayer(dtype='float64')
self.assertEqual(layer(1.).dtype, 'float64')
@testing_utils.enable_v2_dtype_behavior
def multiple_inputs_cast_to_dtype(self):
class MultiIdentityLayer(keras.layers.Layer):
def call(self, inputs):
return [array_ops.identity(x) for x in inputs]
# Testing layer with default dtype of float32
layer = MultiIdentityLayer()
x, y = layer([self._const('float16'), self._const('float32')])
self.assertEqual(x.dtype, 'float32')
self.assertEqual(y.dtype, 'float32')
# Test passing dtype to the constructor
layer = MultiIdentityLayer(dtype='float64')
x, y = layer([self._const('float16'), self._const('float32')])
self.assertEqual(x.dtype, 'float64')
self.assertEqual(y.dtype, 'float64')
# Test several non-floating point types
layer = MultiIdentityLayer(dtype='float64')
x, y, z, w = layer([self._const('float16'), self._const('bool'),
self._const('float64'), self._constant('complex64')])
self.assertEqual(x.dtype, 'float64')
self.assertEqual(y.dtype, 'bool')
self.assertEqual(z.dtype, 'float64')
self.assertEqual(w.dtype, 'complex64')
@testing_utils.enable_v2_dtype_behavior
def test_extra_args_and_kwargs_not_casted(self):
class IdentityLayerWithArgs(keras.layers.Layer):
def call(self, inputs, *args, **kwargs):
return nest.flatten([inputs, args, kwargs])
layer = IdentityLayerWithArgs(dtype='float64')
x, y, z = layer(self._const('float16'), self._const('float16'),
kwarg=self._const('float16'))
self.assertEqual(x.dtype, 'float64')
self.assertEqual(y.dtype, 'float16')
self.assertEqual(z.dtype, 'float16')
@testing_utils.enable_v2_dtype_behavior
def test_layer_without_autocast(self):
class IdentityLayerWithoutAutocast(IdentityLayer):
def __init__(self, *args, **kwargs):
kwargs['experimental_autocast'] = False
super(IdentityLayerWithoutAutocast, self).__init__(*args, **kwargs)
layer = IdentityLayerWithoutAutocast(dtype='float64')
self.assertEqual(layer(self._const('float32')).dtype, 'float32')
@testing_utils.enable_v2_dtype_behavior
def test_dtype_warnings(self):
# Test a layer warns when it casts inputs.
layer = IdentityLayer()
with test.mock.patch.object(tf_logging, 'warn') as mock_warn:
layer(self._const('float64'))
self.assertRegexpMatches(
str(mock_warn.call_args),
".*from dtype float64 to the layer's dtype of float32.*"
"The layer has dtype float32 because.*")
# Test a layer does not warn a second time
with test.mock.patch.object(tf_logging, 'warn') as mock_warn:
layer(self._const('float64'))
mock_warn.assert_not_called()
# Test a new layer can warn even if a different layer already warned
layer = IdentityLayer()
with test.mock.patch.object(tf_logging, 'warn') as mock_warn:
layer(self._const('float64'))
self.assertRegexpMatches(
str(mock_warn.call_args),
".*from dtype float64 to the layer's dtype of float32.*"
"The layer has dtype float32 because.*")
# Test a layer does not warn if a dtype is passed
layer = IdentityLayer(dtype='float32')
with test.mock.patch.object(tf_logging, 'warn') as mock_warn:
layer(self._const('float64'))
mock_warn.assert_not_called()
# Test a layer does not warn if a Policy is set:
with policy.policy_scope('float32'):
layer = IdentityLayer()
with test.mock.patch.object(tf_logging, 'warn') as mock_warn:
layer(self._const('float64'))
mock_warn.assert_not_called()
@testing_utils.enable_v2_dtype_behavior
def test_compute_output_signature(self):
class IdentityLayerWithOutputShape(IdentityLayer):
def compute_output_shape(self, input_shape):
return input_shape
layer = IdentityLayerWithOutputShape(dtype='float64')
output_signature = layer.compute_output_signature(
tensor_spec.TensorSpec(shape=(), dtype='float32'))
self.assertEqual(output_signature.shape, ())
self.assertEqual(output_signature.dtype, 'float64')
@testing_utils.enable_v2_dtype_behavior
def test_passing_non_tensor(self):
layer = IdentityLayer()
x = object()
y = layer(x) # Layer should not cast 'x', as it's not a tensor
self.assertIs(x, y)
@testing_utils.disable_v2_dtype_behavior
def test_v1_behavior(self):
# Test dtype defaults to None and inferred from input
layer = IdentityLayer()
self.assertIsNone(layer.dtype)
layer(self._const('float64'))
self.assertEqual(layer.dtype, 'float64')
# Test layer does not cast to dtype
self.assertEqual(layer(self._const('float32')).dtype, 'float32')
_LAYERS_TO_TEST = [
(keras.layers.Dense, (1,), collections.OrderedDict(units=[1])),
(keras.layers.Activation, (2, 2),
collections.OrderedDict(activation=['relu'])),
(keras.layers.Dropout, (16,), collections.OrderedDict(rate=[0.25])),
(keras.layers.BatchNormalization, (8, 8, 3), collections.OrderedDict(
axis=[3], center=[True, False], scale=[True, False])),
(keras.layers.Conv1D, (8, 8), collections.OrderedDict(
filters=[1], kernel_size=[1, 3], strides=[1, 2],
padding=['valid', 'same'], use_bias=[True, False],
kernel_regularizer=[None, 'l2'])),
(keras.layers.Conv2D, (8, 8, 3), collections.OrderedDict(
filters=[1], kernel_size=[1, 3], strides=[1, 2],
padding=['valid', 'same'], use_bias=[True, False],
kernel_regularizer=[None, 'l2'])),
(keras.layers.LSTM, (8, 8), collections.OrderedDict(
units=[1],
activation=[None, 'relu'],
kernel_regularizer=[None, 'l2'],
dropout=[0, 0.5],
stateful=[True, False],
unroll=[True, False])),
]
OUTPUT_TEST_CASES = []
for layer_type, inp_shape, arg_dict in _LAYERS_TO_TEST:
arg_combinations = [[(k, i) for i in v] for k, v in arg_dict.items()] # pylint: disable=g-complex-comprehension
for arguments in it.product(*arg_combinations):
name = '_{}_{}'.format(layer_type.__name__,
'_'.join('{}_{}'.format(k, v) for k, v in arguments))
OUTPUT_TEST_CASES.append(
(name, layer_type, inp_shape, {k: v for k, v in arguments}))
class OutputTypeTest(keras_parameterized.TestCase):
"""Test that layers and models produce the correct tensor types."""
# In v1 graph there are only symbolic tensors.
@keras_parameterized.run_all_keras_modes(always_skip_v1=True)
@parameterized.named_parameters(*OUTPUT_TEST_CASES)
def test_layer_outputs(self, layer_to_test, input_shape, layer_kwargs):
layer = layer_to_test(**layer_kwargs)
input_data = np.ones(shape=(2,) + input_shape, dtype=np.float32)
layer_result = layer(input_data)
inp = keras.layers.Input(shape=input_shape, batch_size=2)
model = keras.models.Model(inp, layer_to_test(**layer_kwargs)(inp))
model_result = model(input_data)
for x in [layer_result, model_result]:
if not isinstance(x, ops.Tensor):
raise ValueError('Tensor or EagerTensor expected, got type {}'
.format(type(x)))
if isinstance(x, ops.EagerTensor) != context.executing_eagerly():
expected_type = (ops.EagerTensor if context.executing_eagerly()
else ops.Tensor)
raise ValueError('Expected type {}, got type {}'
.format(expected_type, type(x)))
if __name__ == '__main__':
ops.enable_eager_execution()
test.main()
| 35.459005 | 114 | 0.676648 |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import itertools as it
import os
import sys
import traceback
from absl.testing import parameterized
import numpy as np
from tensorflow.python import keras
from tensorflow.python.eager import context
from tensorflow.python.eager import def_function
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_spec
from tensorflow.python.framework import test_util
from tensorflow.python.keras import backend
from tensorflow.python.keras import keras_parameterized
from tensorflow.python.keras import testing_utils
from tensorflow.python.keras.engine import base_layer
from tensorflow.python.keras.mixed_precision.experimental import policy
from tensorflow.python.keras.optimizer_v2 import rmsprop
from tensorflow.python.keras.utils import tf_utils
from tensorflow.python.layers import core as legacy_core
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import summary_ops_v2
from tensorflow.python.ops import tensor_array_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import gfile
from tensorflow.python.platform import test
from tensorflow.python.platform import tf_logging
from tensorflow.python.summary import summary_iterator
from tensorflow.python.util import nest
class DynamicLayer(base_layer.Layer):
def __init__(self, dynamic=False, **kwargs):
super(DynamicLayer, self).__init__(dynamic=dynamic, **kwargs)
def call(self, inputs):
samples = tensor_array_ops.TensorArray(
dtype=dtypes.float32, size=array_ops.shape(inputs)[0])
for idx, sample in enumerate(inputs):
samples = samples.write(idx, math_ops.square(sample))
return samples.stack()
def compute_output_shape(self, input_shape):
return input_shape
class InvalidLayer(base_layer.Layer):
def call(self, inputs):
raise ValueError('You did something wrong!')
class BaseLayerTest(keras_parameterized.TestCase):
@keras_parameterized.run_with_all_model_types
def test_dynamic_layer(self):
model = testing_utils.get_model_from_layers([DynamicLayer(dynamic=True)],
input_shape=(3,))
self.assertEqual(model.dynamic, True)
model.compile(rmsprop.RMSprop(0.001), loss='mse')
self.assertEqual(model.run_eagerly, True)
model.train_on_batch(np.random.random((2, 3)), np.random.random((2, 3)))
@keras_parameterized.run_with_all_model_types
def test_dynamic_layer_error(self):
with self.assertRaisesRegexp(TypeError,
'attempting to use Python control flow'):
model = testing_utils.get_model_from_layers([DynamicLayer()],
input_shape=(3,))
model.compile(rmsprop.RMSprop(0.001), loss='mse')
model.train_on_batch(np.random.random((2, 3)), np.random.random((2, 3)))
@keras_parameterized.run_with_all_model_types
def test_dynamic_layer_error_running_in_graph_mode(self):
with context.graph_mode():
model = testing_utils.get_model_from_layers([DynamicLayer(dynamic=True)],
input_shape=(3,))
self.assertEqual(model.dynamic, True)
with self.assertRaisesRegexp(
ValueError, 'You must enable eager execution'):
model.compile(rmsprop.RMSprop(0.001), loss='mse')
def test_manual_compute_output_shape(self):
class BuildCounter(keras.layers.Layer):
def __init__(self, *args, **kwargs): # pylint: disable=redefined-outer-name
super(BuildCounter, self).__init__(*args, **kwargs)
self.build_counter = 0
def build(self, input_shape):
self.build_counter += 1
def call(self, inputs):
return inputs
with context.eager_mode():
layer = BuildCounter(dtype=dtypes.float64)
output_shape = layer.compute_output_shape((None, 10))
self.assertEqual(layer.build_counter, 1)
self.assertEqual(output_shape.as_list(), [None, 10])
output_signature = layer.compute_output_signature(
tensor_spec.TensorSpec(dtype=dtypes.float64, shape=[None, 10]))
self.assertEqual(layer.build_counter, 1)
self.assertEqual(output_signature.dtype, dtypes.float64)
self.assertEqual(output_signature.shape.as_list(), [None, 10])
layer(np.ones((5, 10)))
self.assertEqual(layer.build_counter, 1)
def test_dynamic_layer_with_deferred_sequential_model(self):
model = keras.Sequential(
[DynamicLayer(dynamic=True),
keras.layers.Dense(3)])
self.assertEqual(model.dynamic, True)
model.compile(rmsprop.RMSprop(0.001), loss='mse')
self.assertEqual(model.run_eagerly, True)
model.train_on_batch(np.random.random((2, 3)), np.random.random((2, 3)))
def test_nested_dynamic_layers_in_eager_mode(self):
inputs = keras.Input((3,))
outputs = DynamicLayer(dynamic=True)(inputs)
inner_model = keras.Model(inputs, outputs)
self.assertEqual(inner_model.dynamic, True)
inputs = keras.Input((3,))
x = DynamicLayer(dynamic=True)(inputs)
outputs = inner_model(x)
model = keras.Model(inputs, outputs)
self.assertEqual(model.dynamic, True)
model.compile(rmsprop.RMSprop(0.001), loss='mse')
self.assertEqual(model.run_eagerly, True)
model.train_on_batch(np.random.random((2, 3)), np.random.random((2, 3)))
def test_dynamic_subclassed_model_no_shape_inference(self):
class MyModel(keras.Model):
def __init__(self):
super(MyModel, self).__init__(dynamic=True)
self.layer1 = keras.layers.Dense(3)
self.layer2 = keras.layers.Dense(3)
def call(self, inputs):
if math_ops.reduce_sum(inputs) > 0:
return self.layer1(inputs)
else:
return self.layer2(inputs)
model = MyModel()
self.assertEqual(model.dynamic, True)
model.compile(rmsprop.RMSprop(0.001), loss='mse')
self.assertEqual(model.run_eagerly, True)
model.train_on_batch(np.random.random((2, 3)), np.random.random((2, 3)))
self.assertEqual(model.outputs, [None])
def test_dynamic_subclassed_model_with_shape_inference(self):
class MyModel(keras.Model):
def __init__(self):
super(MyModel, self).__init__(dynamic=True)
self.layer1 = keras.layers.Dense(3)
self.layer2 = keras.layers.Dense(3)
def call(self, inputs):
if math_ops.reduce_sum(inputs) > 0:
return self.layer1(inputs)
else:
return self.layer2(inputs)
def compute_output_shape(self, input_shape):
return tensor_shape.TensorShape(
tuple(input_shape[:-1].as_list()) + (3,))
model = MyModel()
self.assertEqual(model.dynamic, True)
model.compile(rmsprop.RMSprop(0.001), loss='mse')
model.train_on_batch(np.random.random((2, 3)), np.random.random((2, 3)))
self.assertEqual(model.outputs[0].shape.as_list(), [None, 3])
@keras_parameterized.run_all_keras_modes
def test_add_loss_correctness(self):
class MyLayer(keras.layers.Layer):
def call(self, inputs, training=None):
self.add_loss(math_ops.reduce_sum(inputs))
return inputs
inputs = keras.Input((3,))
layer = MyLayer()
outputs = layer(inputs)
model = keras.Model(inputs, outputs)
self.assertEqual(len(model.losses), 1)
model.compile(
'sgd',
'mse',
run_eagerly=testing_utils.should_run_eagerly(),
experimental_run_tf_function=testing_utils.should_run_tf_function())
loss = model.train_on_batch(np.ones((2, 3)), np.ones((2, 3)))
self.assertEqual(loss, 2 * 3)
@test_util.run_in_graph_and_eager_modes
def test_invalid_forward_pass(self):
inputs = keras.Input((3,))
with self.assertRaisesRegexp(ValueError, 'You did something wrong!'):
_ = InvalidLayer()(inputs)
def test_no_legacy_model(self):
inputs = keras.Input((1,))
legacy_dense_0 = legacy_core.Dense(1, name='legacy_dense_0')
legacy_dense_1 = legacy_core.Dense(1, name='legacy_dense_1')
layer = legacy_dense_0(inputs)
layer = keras.layers.Dense(1)(layer)
layer = legacy_dense_1(layer)
expected_regex = (r'The following are legacy tf\.layers\.Layers:\n '
'{}\n {}'.format(legacy_dense_0, legacy_dense_1))
with self.assertRaisesRegexp(TypeError, expected_regex):
_ = keras.models.Model(inputs=[inputs], outputs=[layer])
model = keras.models.Model(inputs=[inputs], outputs=[inputs])
with self.assertRaisesRegexp(TypeError, expected_regex):
model._insert_layers([legacy_dense_0, legacy_dense_1])
def test_no_legacy_sequential(self):
layers = [
keras.layers.Dense(1),
legacy_core.Dense(1, name='legacy_dense_0')
]
expected_regex = r'legacy tf\.layers\.Layers:\n {}'.format(layers[1])
with self.assertRaisesRegexp(TypeError, expected_regex):
_ = keras.models.Sequential(layers)
with self.assertRaisesRegexp(TypeError, expected_regex):
_ = keras.models.Sequential([keras.layers.Input(shape=(4,))] + layers)
model = keras.models.Sequential()
with self.assertRaisesRegexp(TypeError, expected_regex):
for l in layers:
model.add(l)
@keras_parameterized.run_with_all_model_types
@test_util.run_in_graph_and_eager_modes
def test_build_with_numpy_data(self):
model_layers = [
keras.layers.Dense(3, activation='relu', kernel_initializer='ones'),
keras.layers.Dense(1, activation='sigmoid', kernel_initializer='ones')
]
model = testing_utils.get_model_from_layers(model_layers, input_shape=(4,))
model(np.zeros((2, 4), dtype='float32'))
self.assertTrue(model.built)
@test_util.run_in_graph_and_eager_modes
def test_default_add_weight(self):
class TestLayer(keras.layers.Layer):
def __init__(self):
super(TestLayer, self).__init__()
self.default_weight = self.add_weight()
self.weight_without_name = self.add_weight(shape=(3, 4))
self.regularized_weight_without_name = self.add_weight(
shape=(3, 4), regularizer='l2')
layer = TestLayer()
self.assertEqual(layer.default_weight.shape.as_list(), [])
self.assertEqual(layer.weight_without_name.shape.as_list(), [3, 4])
self.assertEqual(layer.default_weight.dtype.name, 'float32')
self.assertEqual(layer.weight_without_name.dtype.name, 'float32')
self.assertEqual(len(layer.losses), 1)
if not context.executing_eagerly():
# Cannot access tensor.name in eager execution.
self.assertTrue('Variable_2/Regularizer' in layer.losses[0].name)
@keras_parameterized.run_all_keras_modes(always_skip_v1=True)
def test_learning_phase_freezing_for_layers(self):
class LearningPhaseLayer(keras.layers.Layer):
def call(self, inputs):
return keras.backend.in_train_phase(
lambda: array_ops.ones_like(inputs),
lambda: array_ops.zeros_like(inputs))
def get_learning_phase_value():
model = keras.models.Sequential([LearningPhaseLayer(input_shape=(1,))])
model._run_eagerly = testing_utils.should_run_eagerly()
model._experimental_run_tf_function = (
testing_utils.should_run_tf_function())
return np.sum(model(np.ones((1, 1))))
self.assertEqual(get_learning_phase_value(), 0)
# Test scope.
with keras.backend.learning_phase_scope(1):
self.assertEqual(get_learning_phase_value(), 1)
# The effects of the scope end after exiting it.
self.assertEqual(get_learning_phase_value(), 0)
# Test setting.
keras.backend.set_learning_phase(1)
self.assertEqual(get_learning_phase_value(), 1)
keras.backend.set_learning_phase(0)
self.assertEqual(get_learning_phase_value(), 0)
@keras_parameterized.run_all_keras_modes
def test_learning_phase_freezing_for_layers_in_predict(self):
if not (testing_utils.should_run_eagerly() or
testing_utils.should_run_tf_function()):
self.skipTest('Predict fails to override the outer learning phase in'
'the FuncGraph path.')
class LearningPhaseLayer(keras.layers.Layer):
def call(self, inputs):
return keras.backend.in_train_phase(
lambda: array_ops.ones_like(inputs),
lambda: array_ops.zeros_like(inputs))
def get_learning_phase_value():
model = keras.models.Sequential([LearningPhaseLayer(input_shape=(1,))])
model._run_eagerly = testing_utils.should_run_eagerly()
model._experimental_run_tf_function = (
testing_utils.should_run_tf_function())
return np.sum(model.predict(np.ones((1, 1))))
self.assertEqual(get_learning_phase_value(), 0)
# Test scope.
with keras.backend.learning_phase_scope(1):
self.assertEqual(get_learning_phase_value(), 0)
# The effects of the scope end after exiting it.
self.assertEqual(get_learning_phase_value(), 0)
# Test setting.
keras.backend.set_learning_phase(1)
self.assertEqual(get_learning_phase_value(), 0)
keras.backend.set_learning_phase(0)
self.assertEqual(get_learning_phase_value(), 0)
# Cannot be enabled with `run_eagerly=True`, see b/123904578
@test_util.run_all_in_graph_and_eager_modes
def test_layer_can_return_variable(self):
class ComputeSum(keras.layers.Layer):
def __init__(self):
super(ComputeSum, self).__init__()
self.total = variables.Variable(
initial_value=array_ops.zeros((1, 1)), trainable=False)
if not context.executing_eagerly():
keras.backend.get_session().run(self.total.initializer)
def call(self, inputs):
self.total.assign_add(inputs)
return self.total
inputs = keras.Input(shape=(1,))
model = keras.Model(inputs, ComputeSum()(inputs))
model.predict(np.ones((1, 1)))
def _get_layer_with_training_arg(self):
class TrainingLayer(keras.layers.Layer):
@def_function.function
def call(self, inputs, training=None):
if training is None:
training = keras.backend.learning_phase()
return tf_utils.smart_cond(training,
lambda: array_ops.ones_like(inputs),
lambda: array_ops.zeros_like(inputs))
return TrainingLayer()
@keras_parameterized.run_with_all_model_types
# b/124459427: can't test with `run_eagerly=True` for now.
@test_util.run_in_graph_and_eager_modes
def test_training_arg_in_defun(self):
layer = self._get_layer_with_training_arg()
model = testing_utils.get_model_from_layers([layer], input_shape=(1,))
model.compile(rmsprop.RMSprop(0.),
loss='mae')
history = model.fit(np.zeros((1, 1)), np.zeros((1, 1)))
self.assertEqual(history.history['loss'][0], 1.)
loss = model.evaluate(np.zeros((1, 1)), np.zeros((1, 1)))
self.assertEqual(loss, 0.)
layer = self._get_layer_with_training_arg()
inputs = keras.Input(shape=(1,))
outputs = layer(inputs, training=False)
model = keras.Model(inputs, outputs)
model.compile(rmsprop.RMSprop(0.),
loss='mae')
history = model.fit(np.zeros((1, 1)), np.zeros((1, 1)))
self.assertEqual(history.history['loss'][0], 0.)
@keras_parameterized.run_with_all_model_types
@keras_parameterized.run_all_keras_modes
def test_raw_variable_assignment(self):
class RawVariableLayer(keras.layers.Layer):
def __init__(self, **kwargs):
super(RawVariableLayer, self).__init__(**kwargs)
self.var_list = [variables.Variable(1.), {'a': variables.Variable(2.)}]
def call(self, inputs):
return inputs * self.var_list[0] * self.var_list[1]['a']
model = testing_utils.get_model_from_layers([RawVariableLayer()],
input_shape=(10,))
model.compile(
'sgd',
'mse',
run_eagerly=testing_utils.should_run_eagerly(),
experimental_run_tf_function=testing_utils.should_run_tf_function())
x, y = np.ones((10, 10)), np.ones((10, 10))
model.fit(x, y, batch_size=2, epochs=2)
@test_util.run_in_graph_and_eager_modes
def test_layer_names(self):
inputs = keras.layers.Input(shape=[2])
add1 = inputs + inputs
add2 = keras.layers.Add()([inputs, inputs])
add3 = inputs + inputs
add4 = keras.layers.Add()([inputs, inputs])
model = keras.models.Model(
inputs=[inputs], outputs=[add1, add2, add3, add4])
self.assertEqual(
[l.name for l in model.layers],
['input_1', 'tf_op_layer_add', 'add', 'tf_op_layer_add_2', 'add_1'])
def test_add_trainable_weight_on_frozen_layer(self):
class TestLayer(keras.layers.Layer):
def build(self, input_shape):
self.w = self.add_weight(shape=(), trainable=True)
def call(self, inputs):
return self.w * inputs
layer = TestLayer()
layer.trainable = False
layer.build(None)
layer.trainable = True
self.assertListEqual(layer.trainable_weights, [layer.w])
@keras_parameterized.run_with_all_model_types
@keras_parameterized.run_all_keras_modes
def test_passing_initial_weights_values(self):
kernel_value = np.random.random((10, 2))
layer_with_weights = keras.layers.Dense(
2, use_bias=False, weights=[kernel_value])
model = testing_utils.get_model_from_layers([layer_with_weights],
input_shape=(10,))
model.compile(
'sgd',
'mse',
run_eagerly=testing_utils.should_run_eagerly(),
experimental_run_tf_function=testing_utils.should_run_tf_function())
inputs = np.random.random((3, 10))
out = model.predict(inputs)
self.assertAllClose(model.layers[-1].get_weights()[0], kernel_value)
self.assertAllClose(out, np.dot(inputs, kernel_value))
@test_util.run_in_graph_and_eager_modes
def test_set_weights_and_get_weights(self):
layer = keras.layers.Dense(2)
layer.build((None, 10))
kernel = np.random.random((10, 2))
bias = np.random.random((2,))
layer.set_weights([kernel, bias])
weights = layer.get_weights()
self.assertEqual(len(weights), 2)
self.assertAllClose(weights[0], kernel)
self.assertAllClose(weights[1], bias)
with self.assertRaisesRegexp(
ValueError, 'but the layer was expecting 2 weights'):
layer.set_weights([1, 2, 3])
with self.assertRaisesRegexp(
ValueError, 'not compatible with provided weight shape'):
layer.set_weights([kernel.T, bias])
def test_get_config_error(self):
class MyLayer(keras.layers.Layer):
def __init__(self, my_kwarg='default', **kwargs):
super(MyLayer, self).__init__(**kwargs)
self.my_kwarg = my_kwarg
with self.assertRaises(NotImplementedError):
MyLayer('custom').get_config()
class MyLayerNew(keras.layers.Layer):
def __init__(self, my_kwarg='default', **kwargs):
super(MyLayerNew, self).__init__(**kwargs)
self.my_kwarg = my_kwarg
def get_config(self):
config = super(MyLayerNew, self).get_config()
config['my_kwarg'] = self.my_kwarg
return config
self.assertEqual(MyLayerNew('custom').get_config()['my_kwarg'], 'custom')
class MyLayerNew2(keras.layers.Layer):
def __init__(self, name='MyLayerName', dtype=None, **kwargs):
super(MyLayerNew2, self).__init__(name=name, dtype=dtype, **kwargs)
self.assertEqual(MyLayerNew2(name='New').get_config()['name'], 'New')
class SymbolicSupportTest(test.TestCase):
def test_using_symbolic_tensors_with_tf_ops(self):
x = keras.Input((3,))
y = math_ops.square(x)
self.assertEqual(y.graph, keras.backend.get_graph())
x1, x2 = keras.Input((3,)), keras.Input((3,))
y = array_ops.concat([x1, x2], axis=1)
self.assertEqual(y.graph, keras.backend.get_graph())
with keras.backend.get_graph().as_default():
x1 = keras.Input((3,))
x2 = keras.Input((3,))
y = math_ops.matmul(x1, x2)
self.assertEqual(y.graph, keras.backend.get_graph())
x1 = keras.Input((3,))
x2 = keras.Input((3,))
y = math_ops.matmul(x1, x2)
self.assertEqual(y.graph, keras.backend.get_graph())
def test_mixing_eager_and_graph_tensors(self):
with ops.Graph().as_default():
x1 = array_ops.ones((3, 3))
x2 = array_ops.ones((3, 3))
self.assertIsInstance(x2, ops.EagerTensor)
with self.assertRaisesRegexp(TypeError, 'Graph tensors'):
math_ops.matmul(x1, x2)
def test_mixing_numpy_arrays_and_graph_tensors(self):
with ops.Graph().as_default():
x1 = array_ops.ones((3, 3))
x2 = np.ones((3, 3), dtype='float32')
with self.assertRaisesRegexp(TypeError, 'Graph tensors'):
math_ops.matmul(x1, x2)
@test_util.run_in_graph_and_eager_modes
def test_mixing_keras_symbolic_tensors_and_eager_tensors(self):
x1 = keras.Input((3,))
x2 = array_ops.ones((3, 3))
y = math_ops.matmul(x1, x2)
self.assertEqual(y.graph, keras.backend.get_graph())
fn = keras.backend.function(inputs=[x1], outputs=[y])
x_val = np.random.random((3, 3))
y_val = np.ones((3, 3))
self.assertAllClose(fn([x_val])[0],
np.matmul(x_val, y_val),
atol=1e-5)
@test_util.run_in_graph_and_eager_modes
def test_mixing_keras_symbolic_tensors_and_numpy_arrays(self):
x1 = keras.Input((3,))
x2 = np.ones((3, 3), dtype='float32')
y = math_ops.matmul(x1, x2)
self.assertEqual(y.graph, keras.backend.get_graph())
fn = keras.backend.function(inputs=[x1], outputs=[y])
x_val = np.random.random((3, 3))
y_val = np.ones((3, 3))
self.assertAllClose(fn([x_val])[0],
np.matmul(x_val, y_val),
atol=1e-5)
@test_util.run_in_graph_and_eager_modes
def test_reraising_exception(self):
# preserve the originating stack trace. An early implementation of this
# logic lost the stack trace. We test the correct behavior here.
class TypeErrorLayer(base_layer.Layer):
def call(self, inputs):
def easily_identifiable_name():
raise TypeError('Non-matching TypeError message.')
easily_identifiable_name()
inputs = keras.Input((3,))
try:
_ = TypeErrorLayer()(inputs)
except TypeError as e:
if hasattr(e, 'ag_error_metadata'):
self.assertIn('easily_identifiable_name', str(e))
# See ErrorMetadataBase in autograph/pyct/errors.py
# Topmost frame corresponds to `call` itself.
function_name = e.ag_error_metadata.translated_stack[-2].function_name
else:
tb = traceback.extract_tb(sys.exc_info()[2])
last_entry = tb[-1]
function_name = last_entry[2]
self.assertEqual(function_name, 'easily_identifiable_name')
@test_util.run_in_graph_and_eager_modes
def test_summaries_in_tf_function(self):
if not context.executing_eagerly():
return
class MyLayer(keras.layers.Layer):
def call(self, inputs):
summary_ops_v2.scalar('mean', math_ops.reduce_mean(inputs))
return inputs
tmp_dir = self.get_temp_dir()
writer = summary_ops_v2.create_file_writer_v2(tmp_dir)
with writer.as_default(), summary_ops_v2.always_record_summaries():
my_layer = MyLayer()
x = array_ops.ones((10, 10))
def my_fn(x):
return my_layer(x)
_ = my_fn(x)
event_file = gfile.Glob(os.path.join(tmp_dir, 'events*'))
self.assertLen(event_file, 1)
event_file = event_file[0]
tags = set()
for e in summary_iterator.summary_iterator(event_file):
for val in e.summary.value:
tags.add(val.tag)
self.assertEqual(set(['my_layer/mean']), tags)
@test_util.run_all_in_graph_and_eager_modes
class NestedTrackingTest(test.TestCase):
def test_nested_layer_variable_tracking(self):
# Test that variables from nested sublayers are
# being tracked by subclassed layers.
class MyLayer(keras.layers.Layer):
def __init__(self):
super(MyLayer, self).__init__()
self.dense1 = keras.layers.Dense(1)
self.dense2 = keras.layers.BatchNormalization()
def build(self, input_shape):
self.v1 = self.add_weight('v1', shape=input_shape[1:].as_list())
self.v2 = variables.Variable(
name='v2',
initial_value=np.zeros(input_shape[1:].as_list(), dtype='float32'),
trainable=False)
def call(self, inputs):
x = self.dense1(inputs) + self.dense2(inputs)
return x + self.v1 + self.v2
layer = MyLayer()
inputs = keras.Input((1,))
_ = layer(inputs)
self.assertEqual(len(layer.weights), 8)
self.assertEqual(len(layer.trainable_weights), 5)
self.assertEqual(len(layer.non_trainable_weights), 3)
layer.dense1.trainable = False
self.assertEqual(len(layer.weights), 8)
self.assertEqual(len(layer.trainable_weights), 3)
self.assertEqual(len(layer.non_trainable_weights), 5)
layer.trainable = False
self.assertEqual(len(layer.weights), 8)
self.assertEqual(len(layer.trainable_weights), 0)
self.assertEqual(len(layer.non_trainable_weights), 8)
self.assertEqual(
set([layer.dense1, layer.dense2, layer.v1, layer.v2]),
set([obj for unused_name, obj in layer._checkpoint_dependencies]))
def test_nested_layer_updates_losses_tracking(self):
# Test that updates and losses from nested sublayers are
# being tracked by subclassed layers.
class UpdateAndLossLayer(keras.layers.Layer):
def build(self, _):
self.v1 = self.add_weight('v1', shape=())
def call(self, inputs):
self.add_loss(math_ops.reduce_sum(inputs))
self.add_update(state_ops.assign_add(self.v1, 1))
return inputs + 1
class MyLayer(keras.layers.Layer):
def build(self, _):
self.v1 = self.add_weight('v1', shape=())
def __init__(self):
super(MyLayer, self).__init__()
self.ul1 = UpdateAndLossLayer()
self.ul2 = UpdateAndLossLayer()
def call(self, inputs):
self.add_loss(math_ops.reduce_sum(inputs))
self.add_update(state_ops.assign_add(self.v1, 1))
x = self.ul1(inputs)
return self.ul2(x)
layer = MyLayer()
if context.executing_eagerly():
inputs = array_ops.ones((3, 1))
_ = layer(inputs)
self.assertEqual(len(layer.losses), 3)
self.assertLen(layer.get_losses_for(None), 3)
else:
inputs = keras.Input((1,))
_ = layer(inputs)
self.assertEqual(len(layer.losses), 3)
self.assertEqual(len(layer.updates), 3)
self.assertLen(layer.get_losses_for(None), 3)
def test_attribute_reassignment(self):
l = keras.layers.Layer()
l.a = keras.layers.Layer()
l.a = []
l.a = variables.Variable(1.)
l.a = keras.layers.Layer()
last_assignment = keras.layers.Layer()
l.a = last_assignment
l.b = variables.Variable(1.)
del l.b
l.c = keras.layers.Layer()
del l.c
l.d = last_assignment
del l.d
self.assertEqual([last_assignment], l._layers)
self.assertEqual([], l.trainable_weights)
self.assertEqual([], l.non_trainable_weights)
self.assertEqual([], l.weights)
del l.a
self.assertEqual([], l._layers)
def test_assign_op_not_tracked_as_variable(self):
class LayerWithAssignAttr(keras.layers.Layer):
def build(self, input_shape):
self.v = variables.Variable(1.)
self.v_assign = self.v.assign_add(2.)
layer = LayerWithAssignAttr()
layer.build((10, 10))
self.assertEqual([layer.v], layer.variables)
def test_layer_class_not_tracked_as_sublayer(self):
# See https://github.com/tensorflow/tensorflow/issues/27431 for details.
class LayerWithClassAttribute(keras.layers.Layer):
def __init__(self):
super(LayerWithClassAttribute, self).__init__()
self.layer_fn = keras.layers.Dense
layer = LayerWithClassAttribute()
self.assertEmpty(layer.variables)
self.assertEmpty(layer.submodules)
def test_layer_call_fn_args(self):
class NonDefunLayer(keras.layers.Layer):
def call(self, inputs, a, mask, b=None, training=None):
return inputs
class DefunLayer(keras.layers.Layer):
@def_function.function
def call(self, x, mask, a, training=None, b=None):
return x
nondefun_layer = NonDefunLayer()
self.assertEqual(nondefun_layer._call_fn_args,
['inputs', 'a', 'mask', 'b', 'training'])
defun_layer = DefunLayer()
self.assertEqual(defun_layer._call_fn_args,
['x', 'mask', 'a', 'training', 'b'])
@test_util.run_all_in_graph_and_eager_modes
class NameScopingTest(keras_parameterized.TestCase):
def test_name_scope_layer(self):
x = keras.backend.placeholder(shape=(10, 10))
layer = keras.layers.Dense(10, name='MyName')
layer(x)
self.assertEqual(layer.bias.name, 'MyName/bias:0')
self.assertEqual(layer.kernel.name, 'MyName/kernel:0')
def test_name_scope_sublayer(self):
class NameScopeTracker(keras.layers.Layer):
def call(self, inputs):
self.active_name_scope = ops.get_name_scope()
return inputs
x = keras.backend.placeholder(shape=(10, 10))
sublayer = NameScopeTracker(name='Sublayer')
layer = keras.layers.Dense(10, activation=sublayer, name='MyName2')
layer(x)
self.assertEqual(layer.bias.name, 'MyName2/bias:0')
self.assertEqual(layer.kernel.name, 'MyName2/kernel:0')
self.assertEqual(sublayer.active_name_scope, 'MyName2/Sublayer')
def test_name_scope_tf_tensor(self):
x = ops.convert_to_tensor(np.ones((10, 10)))
layer = keras.layers.Dense(
10, activation=keras.layers.ReLU(name='MyAct'), name='MyName3')
layer(x)
self.assertEqual(layer.bias.name, 'MyName3/bias:0')
self.assertEqual(layer.kernel.name, 'MyName3/kernel:0')
@keras_parameterized.run_all_keras_modes(always_skip_v1=True)
class AutographControlFlowTest(keras_parameterized.TestCase):
def test_disabling_in_context_is_matched(self):
test_obj = self
class MyLayer(keras.layers.Layer):
def call(self, inputs, training=None):
with test_obj.assertRaisesRegex(TypeError, 'Tensor.*as.*bool'):
if constant_op.constant(False):
return inputs * 1.
return inputs * 0.
@def_function.function(autograph=False)
def test_fn():
return MyLayer()(constant_op.constant([[1., 2., 3.]]))
test_fn()
def test_if_training_pattern_output(self):
class MyLayer(keras.layers.Layer):
def call(self, inputs, training=None):
if training:
return inputs * 1.
return inputs * 0.
inputs = keras.Input((3,))
outputs = MyLayer()(inputs)
model = keras.Model(inputs, outputs)
model.compile(
'sgd',
'mse',
run_eagerly=testing_utils.should_run_eagerly(),
experimental_run_tf_function=testing_utils.should_run_tf_function())
train_loss = model.train_on_batch(np.ones((2, 3)), np.ones((2, 3)))
self.assertEqual(train_loss, 0.)
test_loss = model.test_on_batch(np.ones((2, 3)), np.ones((2, 3)))
self.assertEqual(test_loss, 1.)
def test_if_training_pattern_loss(self):
class MyLayer(keras.layers.Layer):
def call(self, inputs, training=None):
if training:
loss = math_ops.reduce_sum(inputs)
else:
loss = 0.
self.add_loss(loss)
return inputs
inputs = keras.Input((3,))
outputs = MyLayer()(inputs)
model = keras.Model(inputs, outputs)
model.compile(
'sgd',
'mse',
run_eagerly=testing_utils.should_run_eagerly(),
experimental_run_tf_function=testing_utils.should_run_tf_function())
train_loss = model.train_on_batch(np.ones((2, 3)), np.ones((2, 3)))
self.assertEqual(train_loss, 2 * 3)
test_loss = model.test_on_batch(np.ones((2, 3)), np.ones((2, 3)))
self.assertEqual(test_loss, 0)
def test_if_training_pattern_metric(self):
class MyLayer(keras.layers.Layer):
def call(self, inputs, training=None):
if training:
metric = math_ops.reduce_sum(inputs)
else:
metric = 0.
self.add_metric(metric, name='my_metric', aggregation='mean')
return inputs
inputs = keras.Input((3,))
outputs = MyLayer()(inputs)
model = keras.Model(inputs, outputs)
model.compile(
'sgd',
'mse',
run_eagerly=testing_utils.should_run_eagerly(),
experimental_run_tf_function=testing_utils.should_run_tf_function())
_, train_metric = model.train_on_batch(np.ones((2, 3)),
np.ones((2, 3)))
self.assertEqual(train_metric, 2 * 3)
_, test_metric = model.test_on_batch(np.ones((2, 3)),
np.ones((2, 3)))
self.assertEqual(test_metric, 0)
def test_if_training_pattern_update(self):
class MyLayer(keras.layers.Layer):
def build(self, input_shape):
self.counter = self.add_weight(
shape=(), trainable=False, initializer='zeros')
def call(self, inputs, training=None):
if training:
increment = 1.
else:
increment = 0.
self.counter.assign_add(increment)
return inputs
inputs = keras.Input((3,))
layer = MyLayer()
outputs = layer(inputs)
model = keras.Model(inputs, outputs)
model.compile(
'sgd',
'mse',
run_eagerly=testing_utils.should_run_eagerly(),
experimental_run_tf_function=testing_utils.should_run_tf_function())
model.train_on_batch(np.ones((2, 3)), np.ones((2, 3)))
self.assertEqual(keras.backend.get_value(layer.counter), 1.)
def test_conditional_updates_in_call(self):
class MyLayer(keras.layers.Layer):
def __init__(self):
super(MyLayer,
self).__init__(dynamic=testing_utils.should_run_eagerly())
def build(self, input_shape):
self.counter = self.add_weight(
shape=(), trainable=False, initializer='zeros')
def call(self, inputs, training=None):
if training:
z = math_ops.reduce_sum(inputs)
self.add_update(lambda: self.counter.assign_add(z))
return inputs
def compute_output_shape(self, input_shape):
return input_shape
if testing_utils.should_run_eagerly():
inputs = keras.Input((3,))
layer = MyLayer()
outputs = layer(inputs)
model = keras.Model(inputs, outputs)
model.compile(
'sgd',
'mse',
run_eagerly=testing_utils.should_run_eagerly(),
experimental_run_tf_function=testing_utils.should_run_tf_function())
model.train_on_batch(np.ones((2, 3)), np.ones((2, 3)))
self.assertEqual(keras.backend.get_value(layer.counter), 6.)
else:
# TODO(fchollet): support the same workflow in graph mode.
with self.assertRaisesRegexp(RuntimeError,
'`add_update` in a control flow branch'):
layer = MyLayer()
layer(keras.Input((3,)))
_ = layer.updates
def test_conditional_losses_in_call(self):
class MyLayer(keras.layers.Layer):
def __init__(self):
super(MyLayer,
self).__init__(dynamic=testing_utils.should_run_eagerly())
def call(self, inputs, training=None):
if training:
self.add_loss(math_ops.reduce_sum(inputs))
return inputs
def compute_output_shape(self, input_shape):
return input_shape
if testing_utils.should_run_eagerly():
inputs = keras.Input((3,))
layer = MyLayer()
outputs = layer(inputs)
model = keras.Model(inputs, outputs)
model.compile(
'sgd',
'mse',
run_eagerly=testing_utils.should_run_eagerly(),
experimental_run_tf_function=testing_utils.should_run_tf_function())
loss = model.train_on_batch(np.ones((2, 3)), np.ones((2, 3)))
self.assertEqual(loss, 2 * 3)
else:
with self.assertRaisesRegexp(RuntimeError,
'`add_loss` in a control flow branch'):
layer = MyLayer()(keras.Input((3,)))
def test_conditional_callable_losses(self):
model = keras.Sequential([
keras.layers.Dense(
1, kernel_regularizer=keras.regularizers.l2(1e-4), input_shape=(1,))
])
model._run_eagerly = testing_utils.should_run_eagerly()
model._experimental_run_tf_function = testing_utils.should_run_tf_function()
def assert_graph(t):
if not context.executing_eagerly():
self.assertEqual(t.graph, ops.get_default_graph())
@def_function.function
def get_losses(t):
if t < 0:
return math_ops.reduce_sum(model.losses) * t
else:
return math_ops.reduce_sum(model.losses)
assert_graph(get_losses(constant_op.constant(2.)))
assert_graph(get_losses(constant_op.constant(0.5)))
def test_conditional_metrics_in_call(self):
class MyLayer(keras.layers.Layer):
def __init__(self):
super(MyLayer,
self).__init__(dynamic=testing_utils.should_run_eagerly())
def call(self, inputs, training=None):
if training:
self.add_metric(math_ops.reduce_sum(inputs),
name='sum',
aggregation='mean')
return inputs
def compute_output_shape(self, input_shape):
return input_shape
if testing_utils.should_run_eagerly():
inputs = keras.Input((3,))
layer = MyLayer()
outputs = layer(inputs)
model = keras.Model(inputs, outputs)
model.compile(
'sgd',
'mse',
run_eagerly=testing_utils.should_run_eagerly(),
experimental_run_tf_function=testing_utils.should_run_tf_function())
history = model.fit(np.ones((2, 3)), np.ones((2, 3)))
self.assertEqual(history.history['sum'][-1], 2 * 3)
else:
# TODO(fchollet): support the same workflow in graph mode.
with self.assertRaisesRegexp(RuntimeError,
'`add_metric` in a control flow branch'):
layer = MyLayer()(keras.Input((3,)))
def test_conditional_activity_regularizer_in_call(self):
class TestModel(keras.Model):
def __init__(self):
super(TestModel, self).__init__(
name='test_model', dynamic=testing_utils.should_run_eagerly())
self.layer = keras.layers.Dense(2, activity_regularizer='l2')
def call(self, x, training=None):
if math_ops.greater(math_ops.reduce_sum(x), 0.0):
return self.layer(x)
else:
return self.layer(x)
model = TestModel()
model.compile(
loss='mse',
optimizer='sgd',
run_eagerly=testing_utils.should_run_eagerly(),
experimental_run_tf_function=testing_utils.should_run_tf_function())
x = np.ones(shape=(10, 1))
y = np.ones(shape=(10, 2))
if testing_utils.should_run_eagerly():
model.fit(x, y, epochs=2, batch_size=5)
else:
with self.assertRaisesRegexp(
RuntimeError, '`activity_regularizer` in a control flow branch'):
model.fit(x, y, epochs=2, batch_size=5)
def test_conditional_activity_regularizer_with_wrappers_in_call(self):
class TestModel(keras.Model):
def __init__(self):
super(TestModel, self).__init__(
name='test_model', dynamic=testing_utils.should_run_eagerly())
self.layer = keras.layers.TimeDistributed(
keras.layers.Dense(2, activity_regularizer='l2'),
input_shape=(3, 4))
def call(self, x, training=None):
if math_ops.greater(math_ops.reduce_sum(x), 0.0):
return self.layer(x)
else:
return self.layer(x)
model = TestModel()
model.compile(
loss='mse',
optimizer='sgd',
run_eagerly=testing_utils.should_run_eagerly(),
experimental_run_tf_function=testing_utils.should_run_tf_function())
x = np.ones(shape=(10, 3, 4))
y = np.ones(shape=(10, 3, 2))
if testing_utils.should_run_eagerly():
model.fit(x, y, epochs=2, batch_size=5)
else:
with self.assertRaisesRegexp(
RuntimeError, '`activity_regularizer` in a control flow branch'):
model.fit(x, y, epochs=2, batch_size=5)
class AddLayer(keras.layers.Layer):
def build(self, _):
self.v = self.add_weight('v', (), initializer='ones')
self.built = True
def call(self, inputs):
return inputs + self.v
class IdentityLayer(keras.layers.Layer):
def call(self, inputs):
return inputs
@test_util.run_all_in_graph_and_eager_modes
class DTypeTest(keras_parameterized.TestCase):
# This class only have tests relating to layer.dtype. Tests for dtype policies
# are in mixed_precision/experimental/keras_test.py
def _const(self, dtype):
return array_ops.constant(1, dtype=dtype)
@testing_utils.enable_v2_dtype_behavior
def test_dtype_defaults_to_floatx(self):
layer = AddLayer()
self.assertEqual(layer.dtype, 'float32')
layer(self._const('float64'))
self.assertEqual(layer.dtype, 'float32') # dtype should not change
try:
backend.set_floatx('float64')
layer = AddLayer()
self.assertEqual(layer.dtype, 'float64')
finally:
backend.set_floatx('float32')
@testing_utils.enable_v2_dtype_behavior
def test_passing_dtype_to_constructor(self):
layer = IdentityLayer(dtype='float64')
layer(self._const('float32'))
self.assertEqual(layer.dtype, 'float64')
layer = IdentityLayer(dtype='int32')
layer(self._const('float32'))
self.assertEqual(layer.dtype, 'int32')
layer = IdentityLayer(dtype=dtypes.float64)
layer(self._const('float32'))
self.assertEqual(layer.dtype, 'float64')
@testing_utils.enable_v2_dtype_behavior
def input_cast_to_dtype(self):
layer = AddLayer()
# Input should be cast to layer.dtype, so output should also be layer.dtype
self.assertEqual(layer(self._const('float64')).dtype, 'float32')
layer = AddLayer(dtype='float64')
self.assertEqual(layer(self._const('float32')).dtype, 'float64')
# Test inputs are not casted if layer.dtype is not floating-point
layer = IdentityLayer(dtype='int32')
self.assertEqual(layer(self._const('float64')).dtype, 'float64')
# Test inputs are not casted if the inputs are not floating-point
layer = IdentityLayer(dtype='float32')
self.assertEqual(layer(self._const('int32')).dtype, 'int32')
# Test Numpy arrays are casted
layer = IdentityLayer(dtype='float64')
self.assertEqual(layer(np.array(1, dtype='float32')).dtype, 'float64')
# Test Python floats are casted
layer = IdentityLayer(dtype='float64')
self.assertEqual(layer(1.).dtype, 'float64')
@testing_utils.enable_v2_dtype_behavior
def multiple_inputs_cast_to_dtype(self):
class MultiIdentityLayer(keras.layers.Layer):
def call(self, inputs):
return [array_ops.identity(x) for x in inputs]
# Testing layer with default dtype of float32
layer = MultiIdentityLayer()
x, y = layer([self._const('float16'), self._const('float32')])
self.assertEqual(x.dtype, 'float32')
self.assertEqual(y.dtype, 'float32')
# Test passing dtype to the constructor
layer = MultiIdentityLayer(dtype='float64')
x, y = layer([self._const('float16'), self._const('float32')])
self.assertEqual(x.dtype, 'float64')
self.assertEqual(y.dtype, 'float64')
# Test several non-floating point types
layer = MultiIdentityLayer(dtype='float64')
x, y, z, w = layer([self._const('float16'), self._const('bool'),
self._const('float64'), self._constant('complex64')])
self.assertEqual(x.dtype, 'float64')
self.assertEqual(y.dtype, 'bool')
self.assertEqual(z.dtype, 'float64')
self.assertEqual(w.dtype, 'complex64')
@testing_utils.enable_v2_dtype_behavior
def test_extra_args_and_kwargs_not_casted(self):
class IdentityLayerWithArgs(keras.layers.Layer):
def call(self, inputs, *args, **kwargs):
return nest.flatten([inputs, args, kwargs])
layer = IdentityLayerWithArgs(dtype='float64')
x, y, z = layer(self._const('float16'), self._const('float16'),
kwarg=self._const('float16'))
self.assertEqual(x.dtype, 'float64')
self.assertEqual(y.dtype, 'float16')
self.assertEqual(z.dtype, 'float16')
@testing_utils.enable_v2_dtype_behavior
def test_layer_without_autocast(self):
class IdentityLayerWithoutAutocast(IdentityLayer):
def __init__(self, *args, **kwargs):
kwargs['experimental_autocast'] = False
super(IdentityLayerWithoutAutocast, self).__init__(*args, **kwargs)
layer = IdentityLayerWithoutAutocast(dtype='float64')
self.assertEqual(layer(self._const('float32')).dtype, 'float32')
@testing_utils.enable_v2_dtype_behavior
def test_dtype_warnings(self):
# Test a layer warns when it casts inputs.
layer = IdentityLayer()
with test.mock.patch.object(tf_logging, 'warn') as mock_warn:
layer(self._const('float64'))
self.assertRegexpMatches(
str(mock_warn.call_args),
".*from dtype float64 to the layer's dtype of float32.*"
"The layer has dtype float32 because.*")
with test.mock.patch.object(tf_logging, 'warn') as mock_warn:
layer(self._const('float64'))
mock_warn.assert_not_called()
layer = IdentityLayer()
with test.mock.patch.object(tf_logging, 'warn') as mock_warn:
layer(self._const('float64'))
self.assertRegexpMatches(
str(mock_warn.call_args),
".*from dtype float64 to the layer's dtype of float32.*"
"The layer has dtype float32 because.*")
# Test a layer does not warn if a dtype is passed
layer = IdentityLayer(dtype='float32')
with test.mock.patch.object(tf_logging, 'warn') as mock_warn:
layer(self._const('float64'))
mock_warn.assert_not_called()
# Test a layer does not warn if a Policy is set:
with policy.policy_scope('float32'):
layer = IdentityLayer()
with test.mock.patch.object(tf_logging, 'warn') as mock_warn:
layer(self._const('float64'))
mock_warn.assert_not_called()
@testing_utils.enable_v2_dtype_behavior
def test_compute_output_signature(self):
class IdentityLayerWithOutputShape(IdentityLayer):
def compute_output_shape(self, input_shape):
return input_shape
layer = IdentityLayerWithOutputShape(dtype='float64')
output_signature = layer.compute_output_signature(
tensor_spec.TensorSpec(shape=(), dtype='float32'))
self.assertEqual(output_signature.shape, ())
self.assertEqual(output_signature.dtype, 'float64')
@testing_utils.enable_v2_dtype_behavior
def test_passing_non_tensor(self):
layer = IdentityLayer()
x = object()
y = layer(x) # Layer should not cast 'x', as it's not a tensor
self.assertIs(x, y)
@testing_utils.disable_v2_dtype_behavior
def test_v1_behavior(self):
layer = IdentityLayer()
self.assertIsNone(layer.dtype)
layer(self._const('float64'))
self.assertEqual(layer.dtype, 'float64')
self.assertEqual(layer(self._const('float32')).dtype, 'float32')
_LAYERS_TO_TEST = [
(keras.layers.Dense, (1,), collections.OrderedDict(units=[1])),
(keras.layers.Activation, (2, 2),
collections.OrderedDict(activation=['relu'])),
(keras.layers.Dropout, (16,), collections.OrderedDict(rate=[0.25])),
(keras.layers.BatchNormalization, (8, 8, 3), collections.OrderedDict(
axis=[3], center=[True, False], scale=[True, False])),
(keras.layers.Conv1D, (8, 8), collections.OrderedDict(
filters=[1], kernel_size=[1, 3], strides=[1, 2],
padding=['valid', 'same'], use_bias=[True, False],
kernel_regularizer=[None, 'l2'])),
(keras.layers.Conv2D, (8, 8, 3), collections.OrderedDict(
filters=[1], kernel_size=[1, 3], strides=[1, 2],
padding=['valid', 'same'], use_bias=[True, False],
kernel_regularizer=[None, 'l2'])),
(keras.layers.LSTM, (8, 8), collections.OrderedDict(
units=[1],
activation=[None, 'relu'],
kernel_regularizer=[None, 'l2'],
dropout=[0, 0.5],
stateful=[True, False],
unroll=[True, False])),
]
OUTPUT_TEST_CASES = []
for layer_type, inp_shape, arg_dict in _LAYERS_TO_TEST:
arg_combinations = [[(k, i) for i in v] for k, v in arg_dict.items()]
for arguments in it.product(*arg_combinations):
name = '_{}_{}'.format(layer_type.__name__,
'_'.join('{}_{}'.format(k, v) for k, v in arguments))
OUTPUT_TEST_CASES.append(
(name, layer_type, inp_shape, {k: v for k, v in arguments}))
class OutputTypeTest(keras_parameterized.TestCase):
@keras_parameterized.run_all_keras_modes(always_skip_v1=True)
@parameterized.named_parameters(*OUTPUT_TEST_CASES)
def test_layer_outputs(self, layer_to_test, input_shape, layer_kwargs):
layer = layer_to_test(**layer_kwargs)
input_data = np.ones(shape=(2,) + input_shape, dtype=np.float32)
layer_result = layer(input_data)
inp = keras.layers.Input(shape=input_shape, batch_size=2)
model = keras.models.Model(inp, layer_to_test(**layer_kwargs)(inp))
model_result = model(input_data)
for x in [layer_result, model_result]:
if not isinstance(x, ops.Tensor):
raise ValueError('Tensor or EagerTensor expected, got type {}'
.format(type(x)))
if isinstance(x, ops.EagerTensor) != context.executing_eagerly():
expected_type = (ops.EagerTensor if context.executing_eagerly()
else ops.Tensor)
raise ValueError('Expected type {}, got type {}'
.format(expected_type, type(x)))
if __name__ == '__main__':
ops.enable_eager_execution()
test.main()
| true | true |
f71ec28ea3ab09bb3ecd1216b159b4d1db772a66 | 2,157 | py | Python | Scripts/IsInCidrRanges/IsInCidrRanges.py | TBE-Comp/content | 3a27b1c779dec1e4ee918ca2da77538238dd10f0 | [
"MIT"
] | 7 | 2020-09-24T22:38:01.000Z | 2021-07-14T15:58:35.000Z | Scripts/IsInCidrRanges/IsInCidrRanges.py | TBE-Comp/content | 3a27b1c779dec1e4ee918ca2da77538238dd10f0 | [
"MIT"
] | 7 | 2021-03-25T23:09:39.000Z | 2021-09-23T23:27:14.000Z | Scripts/IsInCidrRanges/IsInCidrRanges.py | TBE-Comp/content | 3a27b1c779dec1e4ee918ca2da77538238dd10f0 | [
"MIT"
] | 2 | 2020-12-08T17:03:33.000Z | 2021-07-13T18:32:06.000Z | import demistomock as demisto
# !!!IMPORTANT!!! If this script should be modified, the updated version must be copied to Scripts/IsRFC1918Address to
# fulfill its unit test. This is a necessary workaround for CI configuration
# borrowed from https://stackoverflow.com/questions/819355/how-can-i-check-if-an-ip-is-in-a-network-in-python
import socket
import struct
import re
def csv_string_to_list(v):
if type(v) == str:
return v.lower().replace(' ', '').replace("'", '').replace('\n', '').split(',')
v = [val.lower() for val in v]
return v
def make_mask(n):
"return a mask of n bits as a long integer"
return (2 << n - 1) - 1
def dotted_quad_to_num(ip):
"convert decimal dotted quad string to long integer"
return struct.unpack('<L', socket.inet_aton(ip))[0]
def network_mask(ip, bits):
"Convert a network address to a long integer"
return dotted_quad_to_num(ip) & make_mask(bits)
def address_in_network(ip, net):
"Is an address in a network"
return ip & net == net
def cidr_to_tuple(cidr):
res = re.search(r'^(\d{1,3}\.\d{1,3}.\d{1,3}.\d{1,3})/(\d{1,2})$', cidr)
if not res and re.search(r'^(\d{1,3}\.\d{1,3}.\d{1,3}.\d{1,3})$', cidr):
# plain ip, return a mask of 32 bits
return cidr, 32
elif not res:
# not an ip we recognise
return None
else:
# valid ip and mask
ip = res.group(1)
mask = int(res.group(2))
return ip, mask
def main(value, cidr_ranges):
ADDRESS_LIST = csv_string_to_list(value)
CIDR_LIST = csv_string_to_list(cidr_ranges)
included_addresses = []
for addr in ADDRESS_LIST:
address = dotted_quad_to_num(addr)
for cidr_range in CIDR_LIST:
cidr_tuple = cidr_to_tuple(cidr_range)
cidr_network = network_mask(*cidr_tuple)
if cidr_network and address_in_network(address, cidr_network):
included_addresses.append(addr)
if len(included_addresses) == 0:
return False
else:
return True
if __name__ == "__builtin__" or __name__ == "builtins":
demisto.results(main(**demisto.args()))
| 28.012987 | 118 | 0.641632 | import demistomock as demisto
import socket
import struct
import re
def csv_string_to_list(v):
if type(v) == str:
return v.lower().replace(' ', '').replace("'", '').replace('\n', '').split(',')
v = [val.lower() for val in v]
return v
def make_mask(n):
return (2 << n - 1) - 1
def dotted_quad_to_num(ip):
return struct.unpack('<L', socket.inet_aton(ip))[0]
def network_mask(ip, bits):
return dotted_quad_to_num(ip) & make_mask(bits)
def address_in_network(ip, net):
return ip & net == net
def cidr_to_tuple(cidr):
res = re.search(r'^(\d{1,3}\.\d{1,3}.\d{1,3}.\d{1,3})/(\d{1,2})$', cidr)
if not res and re.search(r'^(\d{1,3}\.\d{1,3}.\d{1,3}.\d{1,3})$', cidr):
# plain ip, return a mask of 32 bits
return cidr, 32
elif not res:
# not an ip we recognise
return None
else:
# valid ip and mask
ip = res.group(1)
mask = int(res.group(2))
return ip, mask
def main(value, cidr_ranges):
ADDRESS_LIST = csv_string_to_list(value)
CIDR_LIST = csv_string_to_list(cidr_ranges)
included_addresses = []
for addr in ADDRESS_LIST:
address = dotted_quad_to_num(addr)
for cidr_range in CIDR_LIST:
cidr_tuple = cidr_to_tuple(cidr_range)
cidr_network = network_mask(*cidr_tuple)
if cidr_network and address_in_network(address, cidr_network):
included_addresses.append(addr)
if len(included_addresses) == 0:
return False
else:
return True
if __name__ == "__builtin__" or __name__ == "builtins":
demisto.results(main(**demisto.args()))
| true | true |
f71ec3d88c94f5f903bd1d4299945bddb83a5f54 | 6,947 | py | Python | tests/providers/apache/kylin/operators/test_kylin_cube.py | troywinter/airflow | ba66ba0d97941c55d9f00f66329a9d3c7ad673e7 | [
"Apache-2.0"
] | 1 | 2019-05-07T06:46:55.000Z | 2019-05-07T06:46:55.000Z | tests/providers/apache/kylin/operators/test_kylin_cube.py | troywinter/airflow | ba66ba0d97941c55d9f00f66329a9d3c7ad673e7 | [
"Apache-2.0"
] | 9 | 2020-07-28T15:07:03.000Z | 2022-03-29T22:27:52.000Z | tests/providers/apache/kylin/operators/test_kylin_cube.py | troywinter/airflow | ba66ba0d97941c55d9f00f66329a9d3c7ad673e7 | [
"Apache-2.0"
] | 2 | 2020-03-08T14:12:55.000Z | 2020-06-10T10:17:32.000Z | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import unittest
from datetime import datetime
from unittest.mock import MagicMock, patch
from airflow.exceptions import AirflowException
from airflow.models import TaskInstance
from airflow.models.dag import DAG
from airflow.providers.apache.kylin.operators.kylin_cube import KylinCubeOperator
from airflow.utils import timezone
DEFAULT_DATE = timezone.datetime(2020, 1, 1)
class TestKylinCubeOperator(unittest.TestCase):
_config = {
'kylin_conn_id': 'kylin_default',
'project': 'learn_kylin',
'cube': 'kylin_sales_cube',
'command': 'build',
'start_time': datetime(2012, 1, 2, 0, 0).strftime("%s") + '000',
'end_time': datetime(2012, 1, 3, 0, 0).strftime("%s") + '000',
}
cube_command = ['fullbuild', 'build', 'merge', 'refresh',
'delete', 'build_streaming', 'merge_streaming', 'refresh_streaming',
'disable', 'enable', 'purge', 'clone', 'drop']
build_response = {"uuid": "c143e0e4-ac5f-434d-acf3-46b0d15e3dc6"}
def setUp(self):
args = {
'owner': 'airflow',
'start_date': DEFAULT_DATE
}
self.dag = DAG('test_dag_id', default_args=args)
@patch('airflow.providers.apache.kylin.operators.kylin_cube.KylinHook')
def test_execute(self, mock_hook):
operator = KylinCubeOperator(
task_id='kylin_task',
dag=self.dag,
**self._config
)
hook = MagicMock()
hook.invoke_command = ['fullbuild', 'build', 'merge', 'refresh',
'delete', 'build_streaming', 'merge_streaming', 'refresh_streaming',
'disable', 'enable', 'purge', 'clone', 'drop']
mock_hook.return_value = hook
mock_hook.cube_run.return_value = {}
self.assertIsNotNone(operator)
self.assertEqual(self._config['kylin_conn_id'], operator.kylin_conn_id)
self.assertEqual(self._config['project'], operator.project)
self.assertEqual(self._config['cube'], operator.cube)
self.assertEqual(self._config['command'], operator.command)
self.assertEqual(self._config['start_time'], operator.start_time)
self.assertEqual(self._config['end_time'], operator.end_time)
operator.execute(None)
mock_hook.assert_called_once_with(
kylin_conn_id=self._config['kylin_conn_id'],
project=self._config['project'],
dsn=None
)
mock_hook.return_value.cube_run.assert_called_once_with('kylin_sales_cube',
'build',
end=datetime(2012, 1, 3, 0, 0),
name=None,
offset_end=None,
offset_start=None,
start=datetime(2012, 1, 2, 0, 0))
@patch('airflow.providers.apache.kylin.operators.kylin_cube.KylinHook')
def test_execute_build(self, mock_hook):
operator = KylinCubeOperator(
is_track_job=True,
timeout=5,
interval=1,
task_id='kylin_task',
dag=self.dag,
**self._config
)
hook = MagicMock()
hook.invoke_command = self.cube_command
hook.cube_run.return_value = self.build_response
hook.get_job_status.side_effect = ["RUNNING", "RUNNING", "FINISHED"]
mock_hook.return_value = hook
self.assertEqual(operator.execute(None)['uuid'], "c143e0e4-ac5f-434d-acf3-46b0d15e3dc6")
@patch('airflow.providers.apache.kylin.operators.kylin_cube.KylinHook')
def test_execute_build_status_error(self, mock_hook):
operator = KylinCubeOperator(
is_track_job=True,
timeout=5,
interval=1,
task_id='kylin_task',
dag=self.dag,
**self._config
)
hook = MagicMock()
hook.invoke_command = self.cube_command
hook.cube_run.return_value = self.build_response
hook.get_job_status.return_value = "ERROR"
mock_hook.return_value = hook
self.assertRaises(AirflowException, operator.execute, None)
@patch('airflow.providers.apache.kylin.operators.kylin_cube.KylinHook')
def test_execute_build_time_out_error(self, mock_hook):
operator = KylinCubeOperator(
is_track_job=True,
timeout=5,
interval=1,
task_id='kylin_task',
dag=self.dag,
**self._config
)
hook = MagicMock()
hook.invoke_command = self.cube_command
hook.cube_run.return_value = self.build_response
hook.get_job_status.return_value = "RUNNING"
mock_hook.return_value = hook
self.assertRaises(AirflowException, operator.execute, None)
def test_render_template(self):
operator = KylinCubeOperator(
task_id="kylin_build_1",
kylin_conn_id='kylin_default',
project="{{ params.project }}",
cube="{{ params.cube }}",
command="{{ params.command }}",
start_time="{{ params.start_time }}",
end_time="{{ params.end_time }}",
is_track_job=True,
dag=self.dag,
params={
'project': 'learn_kylin',
'cube': 'kylin_sales_cube',
'command': 'build',
'start_time': '1483200000000',
'end_time': '1483286400000',
},
)
ti = TaskInstance(operator, DEFAULT_DATE)
ti.render_templates()
self.assertEqual('learn_kylin', getattr(operator, 'project'))
self.assertEqual('kylin_sales_cube', getattr(operator, 'cube'))
self.assertEqual('build', getattr(operator, 'command'))
self.assertEqual('1483200000000', getattr(operator, 'start_time'))
self.assertEqual('1483286400000', getattr(operator, 'end_time'))
| 40.625731 | 99 | 0.601699 |
import unittest
from datetime import datetime
from unittest.mock import MagicMock, patch
from airflow.exceptions import AirflowException
from airflow.models import TaskInstance
from airflow.models.dag import DAG
from airflow.providers.apache.kylin.operators.kylin_cube import KylinCubeOperator
from airflow.utils import timezone
DEFAULT_DATE = timezone.datetime(2020, 1, 1)
class TestKylinCubeOperator(unittest.TestCase):
_config = {
'kylin_conn_id': 'kylin_default',
'project': 'learn_kylin',
'cube': 'kylin_sales_cube',
'command': 'build',
'start_time': datetime(2012, 1, 2, 0, 0).strftime("%s") + '000',
'end_time': datetime(2012, 1, 3, 0, 0).strftime("%s") + '000',
}
cube_command = ['fullbuild', 'build', 'merge', 'refresh',
'delete', 'build_streaming', 'merge_streaming', 'refresh_streaming',
'disable', 'enable', 'purge', 'clone', 'drop']
build_response = {"uuid": "c143e0e4-ac5f-434d-acf3-46b0d15e3dc6"}
def setUp(self):
args = {
'owner': 'airflow',
'start_date': DEFAULT_DATE
}
self.dag = DAG('test_dag_id', default_args=args)
@patch('airflow.providers.apache.kylin.operators.kylin_cube.KylinHook')
def test_execute(self, mock_hook):
operator = KylinCubeOperator(
task_id='kylin_task',
dag=self.dag,
**self._config
)
hook = MagicMock()
hook.invoke_command = ['fullbuild', 'build', 'merge', 'refresh',
'delete', 'build_streaming', 'merge_streaming', 'refresh_streaming',
'disable', 'enable', 'purge', 'clone', 'drop']
mock_hook.return_value = hook
mock_hook.cube_run.return_value = {}
self.assertIsNotNone(operator)
self.assertEqual(self._config['kylin_conn_id'], operator.kylin_conn_id)
self.assertEqual(self._config['project'], operator.project)
self.assertEqual(self._config['cube'], operator.cube)
self.assertEqual(self._config['command'], operator.command)
self.assertEqual(self._config['start_time'], operator.start_time)
self.assertEqual(self._config['end_time'], operator.end_time)
operator.execute(None)
mock_hook.assert_called_once_with(
kylin_conn_id=self._config['kylin_conn_id'],
project=self._config['project'],
dsn=None
)
mock_hook.return_value.cube_run.assert_called_once_with('kylin_sales_cube',
'build',
end=datetime(2012, 1, 3, 0, 0),
name=None,
offset_end=None,
offset_start=None,
start=datetime(2012, 1, 2, 0, 0))
@patch('airflow.providers.apache.kylin.operators.kylin_cube.KylinHook')
def test_execute_build(self, mock_hook):
operator = KylinCubeOperator(
is_track_job=True,
timeout=5,
interval=1,
task_id='kylin_task',
dag=self.dag,
**self._config
)
hook = MagicMock()
hook.invoke_command = self.cube_command
hook.cube_run.return_value = self.build_response
hook.get_job_status.side_effect = ["RUNNING", "RUNNING", "FINISHED"]
mock_hook.return_value = hook
self.assertEqual(operator.execute(None)['uuid'], "c143e0e4-ac5f-434d-acf3-46b0d15e3dc6")
@patch('airflow.providers.apache.kylin.operators.kylin_cube.KylinHook')
def test_execute_build_status_error(self, mock_hook):
operator = KylinCubeOperator(
is_track_job=True,
timeout=5,
interval=1,
task_id='kylin_task',
dag=self.dag,
**self._config
)
hook = MagicMock()
hook.invoke_command = self.cube_command
hook.cube_run.return_value = self.build_response
hook.get_job_status.return_value = "ERROR"
mock_hook.return_value = hook
self.assertRaises(AirflowException, operator.execute, None)
@patch('airflow.providers.apache.kylin.operators.kylin_cube.KylinHook')
def test_execute_build_time_out_error(self, mock_hook):
operator = KylinCubeOperator(
is_track_job=True,
timeout=5,
interval=1,
task_id='kylin_task',
dag=self.dag,
**self._config
)
hook = MagicMock()
hook.invoke_command = self.cube_command
hook.cube_run.return_value = self.build_response
hook.get_job_status.return_value = "RUNNING"
mock_hook.return_value = hook
self.assertRaises(AirflowException, operator.execute, None)
def test_render_template(self):
operator = KylinCubeOperator(
task_id="kylin_build_1",
kylin_conn_id='kylin_default',
project="{{ params.project }}",
cube="{{ params.cube }}",
command="{{ params.command }}",
start_time="{{ params.start_time }}",
end_time="{{ params.end_time }}",
is_track_job=True,
dag=self.dag,
params={
'project': 'learn_kylin',
'cube': 'kylin_sales_cube',
'command': 'build',
'start_time': '1483200000000',
'end_time': '1483286400000',
},
)
ti = TaskInstance(operator, DEFAULT_DATE)
ti.render_templates()
self.assertEqual('learn_kylin', getattr(operator, 'project'))
self.assertEqual('kylin_sales_cube', getattr(operator, 'cube'))
self.assertEqual('build', getattr(operator, 'command'))
self.assertEqual('1483200000000', getattr(operator, 'start_time'))
self.assertEqual('1483286400000', getattr(operator, 'end_time'))
| true | true |
f71ec42a28749ddb6de8a5e3cad145986f2bf2c5 | 5,026 | py | Python | royg_train.py | CRLarry/StyleTransfer | ddea81f8570232743bd7b8dbd569cf77f7cb5a28 | [
"Apache-2.0"
] | null | null | null | royg_train.py | CRLarry/StyleTransfer | ddea81f8570232743bd7b8dbd569cf77f7cb5a28 | [
"Apache-2.0"
] | null | null | null | royg_train.py | CRLarry/StyleTransfer | ddea81f8570232743bd7b8dbd569cf77f7cb5a28 | [
"Apache-2.0"
] | null | null | null | import math, random, time, pygame, sys
from pygame.locals import *
from PIL import Image
import numpy as np
import json
print("i tried")
def random_select(distribution, color, iteration):
random_r = int(np.random.normal(color[0], distribution[0] * 60 / (20*(iteration+1))))
random_g = int(np.random.normal(color[1], distribution[1] * 60 / (20*(iteration+1))))
random_b = int(np.random.normal(color[2], distribution[2] * 60 / (20*(iteration+1))))
if (random_r > 255):
random_r = 255
if (random_g > 255):
random_g = 255
if (random_b > 255):
random_b = 255
if (random_r < 0):
random_r = 0
if (random_g < 0):
random_g = 0
if (random_b < 0):
random_b = 0
return (random_r, random_g, random_b)
def generate_color(input_key, input_color, iteration):
return (random_select(color_model[input_key], input_color, iteration))
def generate_key(input_color):
key = int(input_color[0]/32+1)*100 + int(input_color[1]/32+1)*10 + int(input_color[2]/32+1)
return (key)
window_size = 1024
num_iterations = 2
valid_input = False
grid_colors = []
if __name__ == "__main__":
if (len(sys.argv) == 2):
training_image = sys.argv[1]
im = Image.open(training_image)
pix = im.load()
rgb_values = []
color_model = {}
for x in range(im.size[0]):
these_rgbs = []
for y in range(im.size[1]):
these_rgbs.append(pix[x,y])
rgb_values.append(these_rgbs)
for x in range(im.size[0] / 2):
for y in range(im.size[1] / 2):
rgb_mean = []
rgb_mean.append(sum([rgb_values[x*2][y*2][0], rgb_values[x*2][y*2+1][0], rgb_values[x*2+1][y*2][0], rgb_values[x*2+1][y*2+1][0]]) / 4)
rgb_mean.append(sum([rgb_values[x*2][y*2][1], rgb_values[x*2][y*2+1][1], rgb_values[x*2+1][y*2][1], rgb_values[x*2+1][y*2+1][1]]) / 4)
rgb_mean.append(sum([rgb_values[x*2][y*2][2], rgb_values[x*2][y*2+1][2], rgb_values[x*2+1][y*2][2], rgb_values[x*2+1][y*2+1][2]]) / 4)
rgb_std = []
rgb_std.append(int(np.std([rgb_values[x*2][y*2][0], rgb_values[x*2][y*2+1][0], rgb_values[x*2+1][y*2][0], rgb_values[x*2+1][y*2+1][0]])))
rgb_std.append(int(np.std([rgb_values[x*2][y*2][1], rgb_values[x*2][y*2+1][1], rgb_values[x*2+1][y*2][1], rgb_values[x*2+1][y*2+1][1]])))
rgb_std.append(int(np.std([rgb_values[x*2][y*2][2], rgb_values[x*2][y*2+1][2], rgb_values[x*2+1][y*2][2], rgb_values[x*2+1][y*2+1][2]])))
key = int(rgb_mean[0]/32+1)*100 + int(rgb_mean[1]/32+1)*10 + int(rgb_mean[2]/32+1)
if (key not in color_model.keys()):
color_model[key] = [rgb_std[0], rgb_std[1], rgb_std[2], 1]
else:
color_model[key] = [(color_model[key][0]*color_model[key][3]+rgb_std[0])/(color_model[key][3]+1), (color_model[key][1]*color_model[key][3]+rgb_std[1])/(color_model[key][3]+1), (color_model[key][2]*color_model[key][3]+rgb_std[2])/(color_model[key][3]+1), color_model[key][3]+1]
for x in range(8):
for y in range(8):
for z in range(8):
key = (x+1)*100 + (y+1)*10 + (z+1)
if (key not in color_model.keys()):
color_model[key] = [int(random.uniform(8, 15)), int(random.uniform(8, 15)), int(random.uniform(8, 15)), 1]
if (color_model[key][0] < 6):
color_model[key][0] = int(random.uniform(8, 15))
if (color_model[key][1] < 6):
color_model[key][1] = int(random.uniform(8, 15))
if (color_model[key][2] < 6):
color_model[key][2] = int(random.uniform(8, 15))
valid_input = True
if(valid_input):
for i in range(im.size[0]):
row_colors = []
for j in range(im.size[1]):
row_colors.append(pix[i,j])
grid_colors.append(row_colors)
for i in range(num_iterations):
new_grid_colors = []
grid_colors_list = []
for j in range(len(grid_colors[0]) * 2):
row_colors = []
for k in range(len(grid_colors) * 2):
row_colors.append(generate_color(generate_key(grid_colors[k/2][j/2]) ,grid_colors[k/2][j/2], i))
grid_colors_list.append(generate_color(generate_key(grid_colors[k/2][j/2]) ,grid_colors[k/2][j/2], i))
new_grid_colors.append(row_colors)
grid_colors = new_grid_colors
# img = Image.fromarray(grid_colors, 'RGB')
im2 = Image.new('RGB',(len(grid_colors[0]),len(grid_colors)))
im2.putdata(grid_colors_list)
im2.save("up20.jpg")
| 45.279279 | 296 | 0.53263 | import math, random, time, pygame, sys
from pygame.locals import *
from PIL import Image
import numpy as np
import json
print("i tried")
def random_select(distribution, color, iteration):
random_r = int(np.random.normal(color[0], distribution[0] * 60 / (20*(iteration+1))))
random_g = int(np.random.normal(color[1], distribution[1] * 60 / (20*(iteration+1))))
random_b = int(np.random.normal(color[2], distribution[2] * 60 / (20*(iteration+1))))
if (random_r > 255):
random_r = 255
if (random_g > 255):
random_g = 255
if (random_b > 255):
random_b = 255
if (random_r < 0):
random_r = 0
if (random_g < 0):
random_g = 0
if (random_b < 0):
random_b = 0
return (random_r, random_g, random_b)
def generate_color(input_key, input_color, iteration):
return (random_select(color_model[input_key], input_color, iteration))
def generate_key(input_color):
key = int(input_color[0]/32+1)*100 + int(input_color[1]/32+1)*10 + int(input_color[2]/32+1)
return (key)
window_size = 1024
num_iterations = 2
valid_input = False
grid_colors = []
if __name__ == "__main__":
if (len(sys.argv) == 2):
training_image = sys.argv[1]
im = Image.open(training_image)
pix = im.load()
rgb_values = []
color_model = {}
for x in range(im.size[0]):
these_rgbs = []
for y in range(im.size[1]):
these_rgbs.append(pix[x,y])
rgb_values.append(these_rgbs)
for x in range(im.size[0] / 2):
for y in range(im.size[1] / 2):
rgb_mean = []
rgb_mean.append(sum([rgb_values[x*2][y*2][0], rgb_values[x*2][y*2+1][0], rgb_values[x*2+1][y*2][0], rgb_values[x*2+1][y*2+1][0]]) / 4)
rgb_mean.append(sum([rgb_values[x*2][y*2][1], rgb_values[x*2][y*2+1][1], rgb_values[x*2+1][y*2][1], rgb_values[x*2+1][y*2+1][1]]) / 4)
rgb_mean.append(sum([rgb_values[x*2][y*2][2], rgb_values[x*2][y*2+1][2], rgb_values[x*2+1][y*2][2], rgb_values[x*2+1][y*2+1][2]]) / 4)
rgb_std = []
rgb_std.append(int(np.std([rgb_values[x*2][y*2][0], rgb_values[x*2][y*2+1][0], rgb_values[x*2+1][y*2][0], rgb_values[x*2+1][y*2+1][0]])))
rgb_std.append(int(np.std([rgb_values[x*2][y*2][1], rgb_values[x*2][y*2+1][1], rgb_values[x*2+1][y*2][1], rgb_values[x*2+1][y*2+1][1]])))
rgb_std.append(int(np.std([rgb_values[x*2][y*2][2], rgb_values[x*2][y*2+1][2], rgb_values[x*2+1][y*2][2], rgb_values[x*2+1][y*2+1][2]])))
key = int(rgb_mean[0]/32+1)*100 + int(rgb_mean[1]/32+1)*10 + int(rgb_mean[2]/32+1)
if (key not in color_model.keys()):
color_model[key] = [rgb_std[0], rgb_std[1], rgb_std[2], 1]
else:
color_model[key] = [(color_model[key][0]*color_model[key][3]+rgb_std[0])/(color_model[key][3]+1), (color_model[key][1]*color_model[key][3]+rgb_std[1])/(color_model[key][3]+1), (color_model[key][2]*color_model[key][3]+rgb_std[2])/(color_model[key][3]+1), color_model[key][3]+1]
for x in range(8):
for y in range(8):
for z in range(8):
key = (x+1)*100 + (y+1)*10 + (z+1)
if (key not in color_model.keys()):
color_model[key] = [int(random.uniform(8, 15)), int(random.uniform(8, 15)), int(random.uniform(8, 15)), 1]
if (color_model[key][0] < 6):
color_model[key][0] = int(random.uniform(8, 15))
if (color_model[key][1] < 6):
color_model[key][1] = int(random.uniform(8, 15))
if (color_model[key][2] < 6):
color_model[key][2] = int(random.uniform(8, 15))
valid_input = True
if(valid_input):
for i in range(im.size[0]):
row_colors = []
for j in range(im.size[1]):
row_colors.append(pix[i,j])
grid_colors.append(row_colors)
for i in range(num_iterations):
new_grid_colors = []
grid_colors_list = []
for j in range(len(grid_colors[0]) * 2):
row_colors = []
for k in range(len(grid_colors) * 2):
row_colors.append(generate_color(generate_key(grid_colors[k/2][j/2]) ,grid_colors[k/2][j/2], i))
grid_colors_list.append(generate_color(generate_key(grid_colors[k/2][j/2]) ,grid_colors[k/2][j/2], i))
new_grid_colors.append(row_colors)
grid_colors = new_grid_colors
im2 = Image.new('RGB',(len(grid_colors[0]),len(grid_colors)))
im2.putdata(grid_colors_list)
im2.save("up20.jpg")
| true | true |
f71ec4b896f551291624910e004a7a70b12742c5 | 4,570 | py | Python | tests/components/freedompro/test_switch.py | MrDelik/core | 93a66cc357b226389967668441000498a10453bb | [
"Apache-2.0"
] | 30,023 | 2016-04-13T10:17:53.000Z | 2020-03-02T12:56:31.000Z | tests/components/freedompro/test_switch.py | MrDelik/core | 93a66cc357b226389967668441000498a10453bb | [
"Apache-2.0"
] | 24,710 | 2016-04-13T08:27:26.000Z | 2020-03-02T12:59:13.000Z | tests/components/freedompro/test_switch.py | MrDelik/core | 93a66cc357b226389967668441000498a10453bb | [
"Apache-2.0"
] | 11,956 | 2016-04-13T18:42:31.000Z | 2020-03-02T09:32:12.000Z | """Tests for the Freedompro switch."""
from datetime import timedelta
from unittest.mock import ANY, patch
from homeassistant.components.switch import DOMAIN as SWITCH_DOMAIN, SERVICE_TURN_ON
from homeassistant.const import ATTR_ENTITY_ID, SERVICE_TURN_OFF, STATE_OFF, STATE_ON
from homeassistant.helpers import entity_registry as er
from homeassistant.helpers.entity_component import async_update_entity
from homeassistant.util.dt import utcnow
from tests.common import async_fire_time_changed
from tests.components.freedompro.conftest import get_states_response_for_uid
uid = "3WRRJR6RCZQZSND8VP0YTO3YXCSOFPKBMW8T51TU-LQ*1JKU1MVWHQL-Z9SCUS85VFXMRGNDCDNDDUVVDKBU31W"
async def test_switch_get_state(hass, init_integration):
"""Test states of the switch."""
init_integration
registry = er.async_get(hass)
entity_id = "switch.irrigation_switch"
state = hass.states.get(entity_id)
assert state
assert state.state == STATE_OFF
assert state.attributes.get("friendly_name") == "Irrigation switch"
entry = registry.async_get(entity_id)
assert entry
assert entry.unique_id == uid
states_response = get_states_response_for_uid(uid)
states_response[0]["state"]["on"] = True
with patch(
"homeassistant.components.freedompro.get_states",
return_value=states_response,
):
async_fire_time_changed(hass, utcnow() + timedelta(hours=2))
await hass.async_block_till_done()
state = hass.states.get(entity_id)
assert state
assert state.attributes.get("friendly_name") == "Irrigation switch"
entry = registry.async_get(entity_id)
assert entry
assert entry.unique_id == uid
assert state.state == STATE_ON
async def test_switch_set_off(hass, init_integration):
"""Test set off of the switch."""
init_integration
registry = er.async_get(hass)
entity_id = "switch.irrigation_switch"
states_response = get_states_response_for_uid(uid)
states_response[0]["state"]["on"] = True
with patch(
"homeassistant.components.freedompro.get_states",
return_value=states_response,
):
await async_update_entity(hass, entity_id)
async_fire_time_changed(hass, utcnow() + timedelta(hours=2))
await hass.async_block_till_done()
state = hass.states.get(entity_id)
assert state
assert state.state == STATE_ON
assert state.attributes.get("friendly_name") == "Irrigation switch"
entry = registry.async_get(entity_id)
assert entry
assert entry.unique_id == uid
with patch(
"homeassistant.components.freedompro.switch.put_state"
) as mock_put_state:
assert await hass.services.async_call(
SWITCH_DOMAIN,
SERVICE_TURN_OFF,
{ATTR_ENTITY_ID: [entity_id]},
blocking=True,
)
mock_put_state.assert_called_once_with(ANY, ANY, ANY, '{"on": false}')
states_response = get_states_response_for_uid(uid)
states_response[0]["state"]["on"] = False
with patch(
"homeassistant.components.freedompro.get_states",
return_value=states_response,
):
async_fire_time_changed(hass, utcnow() + timedelta(hours=2))
await hass.async_block_till_done()
state = hass.states.get(entity_id)
assert state.state == STATE_OFF
async def test_switch_set_on(hass, init_integration):
"""Test set on of the switch."""
init_integration
registry = er.async_get(hass)
entity_id = "switch.irrigation_switch"
state = hass.states.get(entity_id)
assert state
assert state.state == STATE_OFF
assert state.attributes.get("friendly_name") == "Irrigation switch"
entry = registry.async_get(entity_id)
assert entry
assert entry.unique_id == uid
with patch(
"homeassistant.components.freedompro.switch.put_state"
) as mock_put_state:
assert await hass.services.async_call(
SWITCH_DOMAIN,
SERVICE_TURN_ON,
{ATTR_ENTITY_ID: [entity_id]},
blocking=True,
)
mock_put_state.assert_called_once_with(ANY, ANY, ANY, '{"on": true}')
states_response = get_states_response_for_uid(uid)
states_response[0]["state"]["on"] = True
with patch(
"homeassistant.components.freedompro.get_states",
return_value=states_response,
):
async_fire_time_changed(hass, utcnow() + timedelta(hours=2))
await hass.async_block_till_done()
state = hass.states.get(entity_id)
assert state.state == STATE_ON
| 32.877698 | 95 | 0.70547 | from datetime import timedelta
from unittest.mock import ANY, patch
from homeassistant.components.switch import DOMAIN as SWITCH_DOMAIN, SERVICE_TURN_ON
from homeassistant.const import ATTR_ENTITY_ID, SERVICE_TURN_OFF, STATE_OFF, STATE_ON
from homeassistant.helpers import entity_registry as er
from homeassistant.helpers.entity_component import async_update_entity
from homeassistant.util.dt import utcnow
from tests.common import async_fire_time_changed
from tests.components.freedompro.conftest import get_states_response_for_uid
uid = "3WRRJR6RCZQZSND8VP0YTO3YXCSOFPKBMW8T51TU-LQ*1JKU1MVWHQL-Z9SCUS85VFXMRGNDCDNDDUVVDKBU31W"
async def test_switch_get_state(hass, init_integration):
init_integration
registry = er.async_get(hass)
entity_id = "switch.irrigation_switch"
state = hass.states.get(entity_id)
assert state
assert state.state == STATE_OFF
assert state.attributes.get("friendly_name") == "Irrigation switch"
entry = registry.async_get(entity_id)
assert entry
assert entry.unique_id == uid
states_response = get_states_response_for_uid(uid)
states_response[0]["state"]["on"] = True
with patch(
"homeassistant.components.freedompro.get_states",
return_value=states_response,
):
async_fire_time_changed(hass, utcnow() + timedelta(hours=2))
await hass.async_block_till_done()
state = hass.states.get(entity_id)
assert state
assert state.attributes.get("friendly_name") == "Irrigation switch"
entry = registry.async_get(entity_id)
assert entry
assert entry.unique_id == uid
assert state.state == STATE_ON
async def test_switch_set_off(hass, init_integration):
init_integration
registry = er.async_get(hass)
entity_id = "switch.irrigation_switch"
states_response = get_states_response_for_uid(uid)
states_response[0]["state"]["on"] = True
with patch(
"homeassistant.components.freedompro.get_states",
return_value=states_response,
):
await async_update_entity(hass, entity_id)
async_fire_time_changed(hass, utcnow() + timedelta(hours=2))
await hass.async_block_till_done()
state = hass.states.get(entity_id)
assert state
assert state.state == STATE_ON
assert state.attributes.get("friendly_name") == "Irrigation switch"
entry = registry.async_get(entity_id)
assert entry
assert entry.unique_id == uid
with patch(
"homeassistant.components.freedompro.switch.put_state"
) as mock_put_state:
assert await hass.services.async_call(
SWITCH_DOMAIN,
SERVICE_TURN_OFF,
{ATTR_ENTITY_ID: [entity_id]},
blocking=True,
)
mock_put_state.assert_called_once_with(ANY, ANY, ANY, '{"on": false}')
states_response = get_states_response_for_uid(uid)
states_response[0]["state"]["on"] = False
with patch(
"homeassistant.components.freedompro.get_states",
return_value=states_response,
):
async_fire_time_changed(hass, utcnow() + timedelta(hours=2))
await hass.async_block_till_done()
state = hass.states.get(entity_id)
assert state.state == STATE_OFF
async def test_switch_set_on(hass, init_integration):
init_integration
registry = er.async_get(hass)
entity_id = "switch.irrigation_switch"
state = hass.states.get(entity_id)
assert state
assert state.state == STATE_OFF
assert state.attributes.get("friendly_name") == "Irrigation switch"
entry = registry.async_get(entity_id)
assert entry
assert entry.unique_id == uid
with patch(
"homeassistant.components.freedompro.switch.put_state"
) as mock_put_state:
assert await hass.services.async_call(
SWITCH_DOMAIN,
SERVICE_TURN_ON,
{ATTR_ENTITY_ID: [entity_id]},
blocking=True,
)
mock_put_state.assert_called_once_with(ANY, ANY, ANY, '{"on": true}')
states_response = get_states_response_for_uid(uid)
states_response[0]["state"]["on"] = True
with patch(
"homeassistant.components.freedompro.get_states",
return_value=states_response,
):
async_fire_time_changed(hass, utcnow() + timedelta(hours=2))
await hass.async_block_till_done()
state = hass.states.get(entity_id)
assert state.state == STATE_ON
| true | true |
f71ec50af27e8ed303e8fc89876d0a14c0305d56 | 658 | py | Python | backend/app/api/api_v1/routers/words.py | davipaula/learn-with-me | 9357ee83004df2a7ddc2b278eab05dcd63119b69 | [
"MIT"
] | null | null | null | backend/app/api/api_v1/routers/words.py | davipaula/learn-with-me | 9357ee83004df2a7ddc2b278eab05dcd63119b69 | [
"MIT"
] | null | null | null | backend/app/api/api_v1/routers/words.py | davipaula/learn-with-me | 9357ee83004df2a7ddc2b278eab05dcd63119b69 | [
"MIT"
] | null | null | null | from fastapi import APIRouter, Depends
from app.db.crud import get_video_captions
from app.db.session import get_db
from app.model import tf_idf
words_router = r = APIRouter()
@r.get("/topics/{topic}/{number_of_words}")
def by_topic(topic: str, number_of_words: int):
most_important_words = tf_idf.run(topic, number_of_words)
return {"topic": topic, "words": most_important_words}
@r.get("/topic/videos/")
def get_videos_with_words(db=Depends(get_db)):
topic = "business"
words = {
"company",
"want",
"business",
"world",
"actually",
}
return {"value": get_video_captions(db, limit=1)}
| 22.689655 | 61 | 0.671733 | from fastapi import APIRouter, Depends
from app.db.crud import get_video_captions
from app.db.session import get_db
from app.model import tf_idf
words_router = r = APIRouter()
@r.get("/topics/{topic}/{number_of_words}")
def by_topic(topic: str, number_of_words: int):
most_important_words = tf_idf.run(topic, number_of_words)
return {"topic": topic, "words": most_important_words}
@r.get("/topic/videos/")
def get_videos_with_words(db=Depends(get_db)):
topic = "business"
words = {
"company",
"want",
"business",
"world",
"actually",
}
return {"value": get_video_captions(db, limit=1)}
| true | true |
f71ec523cbad689ebbde6ad316d921c9776fce3f | 9,001 | py | Python | metrics_utils.py | maojie/depot_tools | ba35c7f8ce4bc9ae93607109b89199d3d9e0af2d | [
"BSD-3-Clause"
] | null | null | null | metrics_utils.py | maojie/depot_tools | ba35c7f8ce4bc9ae93607109b89199d3d9e0af2d | [
"BSD-3-Clause"
] | null | null | null | metrics_utils.py | maojie/depot_tools | ba35c7f8ce4bc9ae93607109b89199d3d9e0af2d | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python
# Copyright (c) 2018 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from __future__ import print_function
import re
import scm
import subprocess2
import sys
import urlparse
# Current version of metrics recording.
# When we add new metrics, the version number will be increased, we display the
# user what has changed, and ask the user to agree again.
CURRENT_VERSION = 1
APP_URL = 'https://cit-cli-metrics.appspot.com'
def get_notice_countdown_header(countdown):
if countdown == 0:
yield ' METRICS COLLECTION IS TAKING PLACE'
else:
yield ' METRICS COLLECTION WILL START IN %d EXECUTIONS' % countdown
def get_notice_version_change_header():
yield ' WE ARE COLLECTING ADDITIONAL METRICS'
yield ''
yield ' Please review the changes and opt-in again.'
def get_notice_footer():
yield 'To suppress this message opt in or out using:'
yield '$ gclient metrics [--opt-in] [--opt-out]'
yield 'For more information please see metrics.README.md'
yield 'in your depot_tools checkout or visit'
yield 'https://goo.gl/yNpRDV.'
def get_change_notice(version):
if version == 0:
pass # No changes for version 0
elif version == 1:
yield 'We want to collect the Git version.'
yield 'We want to collect information about the HTTP'
yield 'requests that depot_tools makes, and the git and'
yield 'cipd commands it executes.'
yield ''
yield 'We only collect known strings to make sure we'
yield 'don\'t record PII.'
KNOWN_PROJECT_URLS = {
'https://chrome-internal.googlesource.com/chrome/ios_internal',
'https://chrome-internal.googlesource.com/infra/infra_internal',
'https://chromium.googlesource.com/breakpad/breakpad',
'https://chromium.googlesource.com/chromium/src',
'https://chromium.googlesource.com/chromium/tools/depot_tools',
'https://chromium.googlesource.com/crashpad/crashpad',
'https://chromium.googlesource.com/external/gyp',
'https://chromium.googlesource.com/external/naclports',
'https://chromium.googlesource.com/infra/goma/client',
'https://chromium.googlesource.com/infra/infra',
'https://chromium.googlesource.com/native_client/',
'https://chromium.googlesource.com/syzygy',
'https://chromium.googlesource.com/v8/v8',
'https://dart.googlesource.com/sdk',
'https://pdfium.googlesource.com/pdfium',
'https://skia.googlesource.com/buildbot',
'https://skia.googlesource.com/skia',
'https://webrtc.googlesource.com/src',
}
KNOWN_HTTP_HOSTS = {
'chrome-internal-review.googlesource.com',
'chromium-review.googlesource.com',
'dart-review.googlesource.com',
'eu1-mirror-chromium-review.googlesource.com',
'pdfium-review.googlesource.com',
'skia-review.googlesource.com',
'us1-mirror-chromium-review.googlesource.com',
'us2-mirror-chromium-review.googlesource.com',
'us3-mirror-chromium-review.googlesource.com',
'webrtc-review.googlesource.com',
}
KNOWN_HTTP_METHODS = {
'DELETE',
'GET',
'PATCH',
'POST',
'PUT',
}
KNOWN_HTTP_PATHS = {
'accounts':
re.compile(r'(/a)?/accounts/.*'),
'changes':
re.compile(r'(/a)?/changes/([^/]+)?$'),
'changes/abandon':
re.compile(r'(/a)?/changes/.*/abandon'),
'changes/comments':
re.compile(r'(/a)?/changes/.*/comments'),
'changes/detail':
re.compile(r'(/a)?/changes/.*/detail'),
'changes/edit':
re.compile(r'(/a)?/changes/.*/edit'),
'changes/message':
re.compile(r'(/a)?/changes/.*/message'),
'changes/restore':
re.compile(r'(/a)?/changes/.*/restore'),
'changes/reviewers':
re.compile(r'(/a)?/changes/.*/reviewers/.*'),
'changes/revisions/commit':
re.compile(r'(/a)?/changes/.*/revisions/.*/commit'),
'changes/revisions/review':
re.compile(r'(/a)?/changes/.*/revisions/.*/review'),
'changes/submit':
re.compile(r'(/a)?/changes/.*/submit'),
'projects/branches':
re.compile(r'(/a)?/projects/.*/branches/.*'),
}
KNOWN_HTTP_ARGS = {
'ALL_REVISIONS',
'CURRENT_COMMIT',
'CURRENT_REVISION',
'DETAILED_ACCOUNTS',
'LABELS',
}
GIT_VERSION_RE = re.compile(
r'git version (\d)\.(\d{0,2})\.(\d{0,2})'
)
KNOWN_SUBCOMMAND_ARGS = {
'cc',
'hashtag',
'l=Auto-Submit+1',
'l=Code-Review+1',
'l=Code-Review+2',
'l=Commit-Queue+1',
'l=Commit-Queue+2',
'label',
'm',
'notify=ALL',
'notify=NONE',
'private',
'r',
'ready',
'topic',
'wip'
}
def get_python_version():
"""Return the python version in the major.minor.micro format."""
return '{v.major}.{v.minor}.{v.micro}'.format(v=sys.version_info)
def get_git_version():
"""Return the Git version in the major.minor.micro format."""
p = subprocess2.Popen(
['git', '--version'],
stdout=subprocess2.PIPE, stderr=subprocess2.PIPE)
stdout, _ = p.communicate()
match = GIT_VERSION_RE.match(stdout)
if not match:
return None
return '%s.%s.%s' % match.groups()
def return_code_from_exception(exception):
"""Returns the exit code that would result of raising the exception."""
if exception is None:
return 0
if isinstance(exception[1], SystemExit):
return exception[1].code
return 1
def seconds_to_weeks(duration):
"""Transform a |duration| from seconds to weeks approximately.
Drops the lowest 19 bits of the integer representation, which ammounts to
about 6 days.
"""
return int(duration) >> 19
def extract_known_subcommand_args(args):
"""Extract the known arguments from the passed list of args."""
known_args = []
for arg in args:
if arg in KNOWN_SUBCOMMAND_ARGS:
known_args.append(arg)
else:
arg = arg.split('=')[0]
if arg in KNOWN_SUBCOMMAND_ARGS:
known_args.append(arg)
return sorted(known_args)
def extract_http_metrics(request_uri, method, status, response_time):
"""Extract metrics from the request URI.
Extracts the host, path, and arguments from the request URI, and returns them
along with the method, status and response time.
The host, method, path and arguments must be in the KNOWN_HTTP_* constants
defined above.
Arguments are the values of the o= url parameter. In Gerrit, additional fields
can be obtained by adding o parameters, each option requires more database
lookups and slows down the query response time to the client, so we make an
effort to collect them.
The regex defined in KNOWN_HTTP_PATH_RES are checked against the path, and
those that match will be returned.
"""
http_metrics = {
'status': status,
'response_time': response_time,
}
if method in KNOWN_HTTP_METHODS:
http_metrics['method'] = method
parsed_url = urlparse.urlparse(request_uri)
if parsed_url.netloc in KNOWN_HTTP_HOSTS:
http_metrics['host'] = parsed_url.netloc
for name, path_re in KNOWN_HTTP_PATHS.iteritems():
if path_re.match(parsed_url.path):
http_metrics['path'] = name
break
parsed_query = urlparse.parse_qs(parsed_url.query)
# Collect o-parameters from the request.
args = [
arg for arg in parsed_query.get('o', [])
if arg in KNOWN_HTTP_ARGS
]
if args:
http_metrics['arguments'] = args
return http_metrics
def get_repo_timestamp(path_to_repo):
"""Get an approximate timestamp for the upstream of |path_to_repo|.
Returns the top two bits of the timestamp of the HEAD for the upstream of the
branch path_to_repo is checked out at.
"""
# Get the upstream for the current branch. If we're not in a branch, fallback
# to HEAD.
try:
upstream = scm.GIT.GetUpstreamBranch(path_to_repo) or 'HEAD'
except subprocess2.CalledProcessError:
upstream = 'HEAD'
# Get the timestamp of the HEAD for the upstream of the current branch.
p = subprocess2.Popen(
['git', '-C', path_to_repo, 'log', '-n1', upstream, '--format=%at'],
stdout=subprocess2.PIPE, stderr=subprocess2.PIPE)
stdout, _ = p.communicate()
# If there was an error, give up.
if p.returncode != 0:
return None
# Get the age of the checkout in weeks.
return seconds_to_weeks(stdout.strip())
def print_boxed_text(out, min_width, lines):
[EW, NS, SE, SW, NE, NW] = list('=|++++')
width = max(min_width, max(len(line) for line in lines))
out(SE + EW * (width + 2) + SW + '\n')
for line in lines:
out('%s %-*s %s\n' % (NS, width, line, NS))
out(NE + EW * (width + 2) + NW + '\n')
def print_notice(countdown):
"""Print a notice to let the user know the status of metrics collection."""
lines = list(get_notice_countdown_header(countdown))
lines.append('')
lines += list(get_notice_footer())
print_boxed_text(sys.stderr.write, 49, lines)
def print_version_change(config_version):
"""Print a notice to let the user know we are collecting more metrics."""
lines = list(get_notice_version_change_header())
for version in xrange(config_version + 1, CURRENT_VERSION + 1):
lines.append('')
lines += list(get_change_notice(version))
print_boxed_text(sys.stderr.write, 49, lines)
| 30.003333 | 80 | 0.693479 |
from __future__ import print_function
import re
import scm
import subprocess2
import sys
import urlparse
CURRENT_VERSION = 1
APP_URL = 'https://cit-cli-metrics.appspot.com'
def get_notice_countdown_header(countdown):
if countdown == 0:
yield ' METRICS COLLECTION IS TAKING PLACE'
else:
yield ' METRICS COLLECTION WILL START IN %d EXECUTIONS' % countdown
def get_notice_version_change_header():
yield ' WE ARE COLLECTING ADDITIONAL METRICS'
yield ''
yield ' Please review the changes and opt-in again.'
def get_notice_footer():
yield 'To suppress this message opt in or out using:'
yield '$ gclient metrics [--opt-in] [--opt-out]'
yield 'For more information please see metrics.README.md'
yield 'in your depot_tools checkout or visit'
yield 'https://goo.gl/yNpRDV.'
def get_change_notice(version):
if version == 0:
pass
elif version == 1:
yield 'We want to collect the Git version.'
yield 'We want to collect information about the HTTP'
yield 'requests that depot_tools makes, and the git and'
yield 'cipd commands it executes.'
yield ''
yield 'We only collect known strings to make sure we'
yield 'don\'t record PII.'
KNOWN_PROJECT_URLS = {
'https://chrome-internal.googlesource.com/chrome/ios_internal',
'https://chrome-internal.googlesource.com/infra/infra_internal',
'https://chromium.googlesource.com/breakpad/breakpad',
'https://chromium.googlesource.com/chromium/src',
'https://chromium.googlesource.com/chromium/tools/depot_tools',
'https://chromium.googlesource.com/crashpad/crashpad',
'https://chromium.googlesource.com/external/gyp',
'https://chromium.googlesource.com/external/naclports',
'https://chromium.googlesource.com/infra/goma/client',
'https://chromium.googlesource.com/infra/infra',
'https://chromium.googlesource.com/native_client/',
'https://chromium.googlesource.com/syzygy',
'https://chromium.googlesource.com/v8/v8',
'https://dart.googlesource.com/sdk',
'https://pdfium.googlesource.com/pdfium',
'https://skia.googlesource.com/buildbot',
'https://skia.googlesource.com/skia',
'https://webrtc.googlesource.com/src',
}
KNOWN_HTTP_HOSTS = {
'chrome-internal-review.googlesource.com',
'chromium-review.googlesource.com',
'dart-review.googlesource.com',
'eu1-mirror-chromium-review.googlesource.com',
'pdfium-review.googlesource.com',
'skia-review.googlesource.com',
'us1-mirror-chromium-review.googlesource.com',
'us2-mirror-chromium-review.googlesource.com',
'us3-mirror-chromium-review.googlesource.com',
'webrtc-review.googlesource.com',
}
KNOWN_HTTP_METHODS = {
'DELETE',
'GET',
'PATCH',
'POST',
'PUT',
}
KNOWN_HTTP_PATHS = {
'accounts':
re.compile(r'(/a)?/accounts/.*'),
'changes':
re.compile(r'(/a)?/changes/([^/]+)?$'),
'changes/abandon':
re.compile(r'(/a)?/changes/.*/abandon'),
'changes/comments':
re.compile(r'(/a)?/changes/.*/comments'),
'changes/detail':
re.compile(r'(/a)?/changes/.*/detail'),
'changes/edit':
re.compile(r'(/a)?/changes/.*/edit'),
'changes/message':
re.compile(r'(/a)?/changes/.*/message'),
'changes/restore':
re.compile(r'(/a)?/changes/.*/restore'),
'changes/reviewers':
re.compile(r'(/a)?/changes/.*/reviewers/.*'),
'changes/revisions/commit':
re.compile(r'(/a)?/changes/.*/revisions/.*/commit'),
'changes/revisions/review':
re.compile(r'(/a)?/changes/.*/revisions/.*/review'),
'changes/submit':
re.compile(r'(/a)?/changes/.*/submit'),
'projects/branches':
re.compile(r'(/a)?/projects/.*/branches/.*'),
}
KNOWN_HTTP_ARGS = {
'ALL_REVISIONS',
'CURRENT_COMMIT',
'CURRENT_REVISION',
'DETAILED_ACCOUNTS',
'LABELS',
}
GIT_VERSION_RE = re.compile(
r'git version (\d)\.(\d{0,2})\.(\d{0,2})'
)
KNOWN_SUBCOMMAND_ARGS = {
'cc',
'hashtag',
'l=Auto-Submit+1',
'l=Code-Review+1',
'l=Code-Review+2',
'l=Commit-Queue+1',
'l=Commit-Queue+2',
'label',
'm',
'notify=ALL',
'notify=NONE',
'private',
'r',
'ready',
'topic',
'wip'
}
def get_python_version():
return '{v.major}.{v.minor}.{v.micro}'.format(v=sys.version_info)
def get_git_version():
p = subprocess2.Popen(
['git', '--version'],
stdout=subprocess2.PIPE, stderr=subprocess2.PIPE)
stdout, _ = p.communicate()
match = GIT_VERSION_RE.match(stdout)
if not match:
return None
return '%s.%s.%s' % match.groups()
def return_code_from_exception(exception):
if exception is None:
return 0
if isinstance(exception[1], SystemExit):
return exception[1].code
return 1
def seconds_to_weeks(duration):
return int(duration) >> 19
def extract_known_subcommand_args(args):
known_args = []
for arg in args:
if arg in KNOWN_SUBCOMMAND_ARGS:
known_args.append(arg)
else:
arg = arg.split('=')[0]
if arg in KNOWN_SUBCOMMAND_ARGS:
known_args.append(arg)
return sorted(known_args)
def extract_http_metrics(request_uri, method, status, response_time):
http_metrics = {
'status': status,
'response_time': response_time,
}
if method in KNOWN_HTTP_METHODS:
http_metrics['method'] = method
parsed_url = urlparse.urlparse(request_uri)
if parsed_url.netloc in KNOWN_HTTP_HOSTS:
http_metrics['host'] = parsed_url.netloc
for name, path_re in KNOWN_HTTP_PATHS.iteritems():
if path_re.match(parsed_url.path):
http_metrics['path'] = name
break
parsed_query = urlparse.parse_qs(parsed_url.query)
# Collect o-parameters from the request.
args = [
arg for arg in parsed_query.get('o', [])
if arg in KNOWN_HTTP_ARGS
]
if args:
http_metrics['arguments'] = args
return http_metrics
def get_repo_timestamp(path_to_repo):
# Get the upstream for the current branch. If we're not in a branch, fallback
try:
upstream = scm.GIT.GetUpstreamBranch(path_to_repo) or 'HEAD'
except subprocess2.CalledProcessError:
upstream = 'HEAD'
p = subprocess2.Popen(
['git', '-C', path_to_repo, 'log', '-n1', upstream, '--format=%at'],
stdout=subprocess2.PIPE, stderr=subprocess2.PIPE)
stdout, _ = p.communicate()
if p.returncode != 0:
return None
return seconds_to_weeks(stdout.strip())
def print_boxed_text(out, min_width, lines):
[EW, NS, SE, SW, NE, NW] = list('=|++++')
width = max(min_width, max(len(line) for line in lines))
out(SE + EW * (width + 2) + SW + '\n')
for line in lines:
out('%s %-*s %s\n' % (NS, width, line, NS))
out(NE + EW * (width + 2) + NW + '\n')
def print_notice(countdown):
lines = list(get_notice_countdown_header(countdown))
lines.append('')
lines += list(get_notice_footer())
print_boxed_text(sys.stderr.write, 49, lines)
def print_version_change(config_version):
lines = list(get_notice_version_change_header())
for version in xrange(config_version + 1, CURRENT_VERSION + 1):
lines.append('')
lines += list(get_change_notice(version))
print_boxed_text(sys.stderr.write, 49, lines)
| true | true |
f71ec570609d44a983cf9c03c4b5548e2be4e2ce | 533 | py | Python | Python Programs/string-reverse-words-zoho.py | muhammad-masood-ur-rehman/Skillrack | 71a25417c89d0efab40ee6229ccd758b26ae4312 | [
"CC0-1.0"
] | 2 | 2021-06-26T21:50:59.000Z | 2021-09-18T04:55:51.000Z | Python Programs/string-reverse-words-zoho.py | muhammad-masood-ur-rehman/Skillrack | 71a25417c89d0efab40ee6229ccd758b26ae4312 | [
"CC0-1.0"
] | null | null | null | Python Programs/string-reverse-words-zoho.py | muhammad-masood-ur-rehman/Skillrack | 71a25417c89d0efab40ee6229ccd758b26ae4312 | [
"CC0-1.0"
] | null | null | null | String - Reverse Words [ZOHO]
A string S is passed as the input. The program must reverse the order of the words in the string and print them as the output.
Input Format:
The first line will contain S.
Output Format:
The first line will contain the words in the string S in the reverse order.
Boundary Conditions:
Length of S is from 5 to 100.
Example Input/Output 1:
Input:
Today is Friday
Output:
Friday is Today
Example Input/Output 2:
Input:
five six ten eleven
Output:
eleven ten six five
s=input().split();s=s[::-1]
print(*s)
| 25.380952 | 126 | 0.75985 | String - Reverse Words [ZOHO]
A string S is passed as the input. The program must reverse the order of the words in the string and print them as the output.
Input Format:
The first line will contain S.
Output Format:
The first line will contain the words in the string S in the reverse order.
Boundary Conditions:
Length of S is from 5 to 100.
Example Input/Output 1:
Input:
Today is Friday
Output:
Friday is Today
Example Input/Output 2:
Input:
five six ten eleven
Output:
eleven ten six five
s=input().split();s=s[::-1]
print(*s)
| false | true |
f71ec5dba68128c31f33297d8579804be932cce8 | 1,874 | py | Python | trisicell/pp/_binary.py | faridrashidi/trisicell | 4db89edd44c03ccb6c7d3477beff0079c3ff8035 | [
"BSD-3-Clause"
] | 2 | 2021-07-02T13:53:15.000Z | 2021-11-16T03:14:36.000Z | trisicell/pp/_binary.py | faridrashidi/trisicell | 4db89edd44c03ccb6c7d3477beff0079c3ff8035 | [
"BSD-3-Clause"
] | 58 | 2021-06-14T17:14:39.000Z | 2022-03-11T19:32:54.000Z | trisicell/pp/_binary.py | faridrashidi/trisicell | 4db89edd44c03ccb6c7d3477beff0079c3ff8035 | [
"BSD-3-Clause"
] | null | null | null | import trisicell as tsc
def binarym_filter_private_mutations(df):
df.drop(df.columns[df.sum() == 1], axis=1, inplace=True)
def binarym_filter_clonal_mutations(df):
x = (df == 1).sum()
x = x[x == df.shape[0]]
df.drop(x.index, axis=1, inplace=True)
def binarym_filter_nonsense_mutations(df, alt_in=2, ref_in=1):
df.drop(
df.columns[
~(
((df == 1).sum() >= alt_in)
& (
((df == 0).sum() >= ref_in)
| ((df == 1).sum() == df.shape[0])
| ((df == 1).sum() >= (df == 3).sum())
)
)
],
axis=1,
inplace=True,
)
def binarym_statistics(df):
t = df.shape[0] * df.shape[1]
a = (df == 0).sum().sum()
b = (df == 1).sum().sum()
d = (df == 3).sum().sum()
tsc.logg.info(f"size = {df.shape[0]} × {df.shape[1]}")
tsc.logg.info(f" REF = {a:6d} ({100*a/t:2.1f}%)")
tsc.logg.info(f" HET = {b:6d} ({100*b/t:2.1f}%)")
tsc.logg.info(f" UNKNOWN = {d:6d} ({100*d/t:2.1f}%)")
def consensus_combine(df):
"""Combine cells in genotype matrix.
This function combines the replicates or cells that have
the same origin prior to running Trisicell-Cons. The replicates
or cells that are supposed to be merged must be designated
with `_`. For instance:
input: {`{Cell1}_{ID1}`, `{Cell1}_{ID2}`, `{Cell2}_{ID1}`, `{Cell2}_{ID2}`}.
output: {`{Cell1}`, `{Cell2}`}.
Parameters
----------
df : :class:`pandas.DataFrame`
The input genotype matrix in conflict-free format.
Returns
-------
:class:`pandas.DataFrame`
The combine genotype matrix.
"""
df2 = df.groupby(df.index.str.split("_").str[0]).transform("prod")
df2 = df2.groupby(df2.index.str.split("_").str[0]).first()
return df2
| 27.558824 | 80 | 0.524546 | import trisicell as tsc
def binarym_filter_private_mutations(df):
df.drop(df.columns[df.sum() == 1], axis=1, inplace=True)
def binarym_filter_clonal_mutations(df):
x = (df == 1).sum()
x = x[x == df.shape[0]]
df.drop(x.index, axis=1, inplace=True)
def binarym_filter_nonsense_mutations(df, alt_in=2, ref_in=1):
df.drop(
df.columns[
~(
((df == 1).sum() >= alt_in)
& (
((df == 0).sum() >= ref_in)
| ((df == 1).sum() == df.shape[0])
| ((df == 1).sum() >= (df == 3).sum())
)
)
],
axis=1,
inplace=True,
)
def binarym_statistics(df):
t = df.shape[0] * df.shape[1]
a = (df == 0).sum().sum()
b = (df == 1).sum().sum()
d = (df == 3).sum().sum()
tsc.logg.info(f"size = {df.shape[0]} × {df.shape[1]}")
tsc.logg.info(f" REF = {a:6d} ({100*a/t:2.1f}%)")
tsc.logg.info(f" HET = {b:6d} ({100*b/t:2.1f}%)")
tsc.logg.info(f" UNKNOWN = {d:6d} ({100*d/t:2.1f}%)")
def consensus_combine(df):
df2 = df.groupby(df.index.str.split("_").str[0]).transform("prod")
df2 = df2.groupby(df2.index.str.split("_").str[0]).first()
return df2
| true | true |
f71ec6103355933855b1b00050428c09848264a0 | 1,522 | py | Python | setup.py | Global-Green-Growth-Institute/GraphModels | 6a1c1a545df24daf2f7571fc6be8d47bece55c55 | [
"MIT"
] | null | null | null | setup.py | Global-Green-Growth-Institute/GraphModels | 6a1c1a545df24daf2f7571fc6be8d47bece55c55 | [
"MIT"
] | null | null | null | setup.py | Global-Green-Growth-Institute/GraphModels | 6a1c1a545df24daf2f7571fc6be8d47bece55c55 | [
"MIT"
] | null | null | null | from setuptools import setup, find_packages
with open('README.md', encoding='utf-8') as f:
long_description = f.read()
with open('requirements.txt') as f:
required = f.read().splitlines()
required = [r.split('==')[0] for r in required]
setup(
name='ggmodel_dev',
version='0.0.25',
description='A Python package for creating graphical models',
url='https://github.com/Global-Green-Growth-Institute/GraphModels',
author='Simon Zabrocki',
author_email='simon.zabrocki@gmail.com',
license='BSD 2-clause',
long_description=long_description,
long_description_content_type='text/markdown',
packages=['ggmodel_dev', 'ggmodel_dev.models', 'ggmodel_dev.models.water', 'ggmodel_dev.models.landuse', 'ggmodel_dev.models.greengrowth', 'ggmodel_dev.models.transport', 'ggmodel_dev.models.energy', 'ggmodel_dev.models.material'],
package_dir={'ggmodel_dev': 'ggmodel_dev/'},
install_requires=required,
classifiers=[
'Development Status :: 1 - Planning',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: BSD License',
'Operating System :: POSIX :: Linux',
'Programming Language :: Python :: 3.6',
],
package_data={'ggmodel_dev.models.water': ['*.json'], 'ggmodel_dev.models.landuse': ['*.json'], 'ggmodel_dev.models.transport': ['*.json'], 'ggmodel_dev.models.greengrowth': ['*.json'], 'ggmodel_dev.models.energy': ['*.json'], 'ggmodel_dev.models.material': ['*.json']},
include_package_data=True,
) | 47.5625 | 274 | 0.685283 | from setuptools import setup, find_packages
with open('README.md', encoding='utf-8') as f:
long_description = f.read()
with open('requirements.txt') as f:
required = f.read().splitlines()
required = [r.split('==')[0] for r in required]
setup(
name='ggmodel_dev',
version='0.0.25',
description='A Python package for creating graphical models',
url='https://github.com/Global-Green-Growth-Institute/GraphModels',
author='Simon Zabrocki',
author_email='simon.zabrocki@gmail.com',
license='BSD 2-clause',
long_description=long_description,
long_description_content_type='text/markdown',
packages=['ggmodel_dev', 'ggmodel_dev.models', 'ggmodel_dev.models.water', 'ggmodel_dev.models.landuse', 'ggmodel_dev.models.greengrowth', 'ggmodel_dev.models.transport', 'ggmodel_dev.models.energy', 'ggmodel_dev.models.material'],
package_dir={'ggmodel_dev': 'ggmodel_dev/'},
install_requires=required,
classifiers=[
'Development Status :: 1 - Planning',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: BSD License',
'Operating System :: POSIX :: Linux',
'Programming Language :: Python :: 3.6',
],
package_data={'ggmodel_dev.models.water': ['*.json'], 'ggmodel_dev.models.landuse': ['*.json'], 'ggmodel_dev.models.transport': ['*.json'], 'ggmodel_dev.models.greengrowth': ['*.json'], 'ggmodel_dev.models.energy': ['*.json'], 'ggmodel_dev.models.material': ['*.json']},
include_package_data=True,
) | true | true |
f71ec734137c6570fd65d5935dee186b8ceb089f | 95 | py | Python | lecture8/dbexample/dbexample2/apps.py | CSUChico-CINS465/CSCI465-Fall2016-Lecture-Examples | 332df2821aef74c6522c53278e28ceb27cbe2fe6 | [
"MIT"
] | null | null | null | lecture8/dbexample/dbexample2/apps.py | CSUChico-CINS465/CSCI465-Fall2016-Lecture-Examples | 332df2821aef74c6522c53278e28ceb27cbe2fe6 | [
"MIT"
] | null | null | null | lecture8/dbexample/dbexample2/apps.py | CSUChico-CINS465/CSCI465-Fall2016-Lecture-Examples | 332df2821aef74c6522c53278e28ceb27cbe2fe6 | [
"MIT"
] | null | null | null | from django.apps import AppConfig
class Dbexample2Config(AppConfig):
name = 'dbexample2'
| 15.833333 | 34 | 0.768421 | from django.apps import AppConfig
class Dbexample2Config(AppConfig):
name = 'dbexample2'
| true | true |
f71ec7380a07ff33e88fe3a83f045f17adce3722 | 112,841 | py | Python | subversion/tests/cmdline/switch_tests.py | saurabhacellere/subversion | 6e7a18e22d4d4cbfad917e18b784adf1912aa67f | [
"Apache-2.0"
] | null | null | null | subversion/tests/cmdline/switch_tests.py | saurabhacellere/subversion | 6e7a18e22d4d4cbfad917e18b784adf1912aa67f | [
"Apache-2.0"
] | null | null | null | subversion/tests/cmdline/switch_tests.py | saurabhacellere/subversion | 6e7a18e22d4d4cbfad917e18b784adf1912aa67f | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
#
# switch_tests.py: testing `svn switch'.
#
# Subversion is a tool for revision control.
# See http://subversion.apache.org for more information.
#
# ====================================================================
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
######################################################################
# General modules
import shutil, re, os
# Our testing module
import svntest
from svntest import verify, actions, main, deeptrees
# (abbreviation)
Skip = svntest.testcase.Skip_deco
SkipUnless = svntest.testcase.SkipUnless_deco
XFail = svntest.testcase.XFail_deco
Issues = svntest.testcase.Issues_deco
Issue = svntest.testcase.Issue_deco
Wimp = svntest.testcase.Wimp_deco
Item = svntest.wc.StateItem
from svntest.main import SVN_PROP_MERGEINFO, server_has_mergeinfo
from svntest.deeptrees import do_routine_switching, commit_routine_switching, \
get_routine_disk_state, get_routine_status_state
######################################################################
# Tests
#
#----------------------------------------------------------------------
def routine_switching(sbox):
"test some basic switching operations"
sbox.build(read_only = True)
# Setup (and verify) some switched things
do_routine_switching(sbox.wc_dir, sbox.repo_url, 1)
#----------------------------------------------------------------------
def commit_switched_things(sbox):
"commits after some basic switching operations"
sbox.build()
wc_dir = sbox.wc_dir
# Setup some switched things (don't bother verifying)
do_routine_switching(wc_dir, sbox.repo_url, 0)
# Commit some stuff (and verify)
commit_routine_switching(wc_dir, 1)
#----------------------------------------------------------------------
def full_update(sbox):
"update wc that contains switched things"
sbox.build()
wc_dir = sbox.wc_dir
# Setup some switched things (don't bother verifying)
do_routine_switching(wc_dir, sbox.repo_url, 0)
# Copy wc_dir to a backup location
wc_backup = sbox.add_wc_path('backup')
svntest.actions.duplicate_dir(wc_dir, wc_backup)
# Commit some stuff (don't bother verifying)
commit_routine_switching(wc_backup, 0)
# Some convenient path variables
iota_path = sbox.ospath('iota')
gamma_path = sbox.ospath('A/D/gamma')
Bpi_path = sbox.ospath('A/B/pi')
BZ_path = sbox.ospath('A/B/Z')
Bzeta_path = sbox.ospath('A/B/Z/zeta')
Gpi_path = sbox.ospath('A/D/G/pi')
GZ_path = sbox.ospath('A/D/G/Z')
Gzeta_path = sbox.ospath('A/D/G/Z/zeta')
# Create expected output tree for an update of wc_backup.
expected_output = svntest.wc.State(wc_dir, {
'iota' : Item(status='U '),
'A/D/gamma' : Item(status='U '),
'A/B/pi' : Item(status='U '),
'A/B/Z' : Item(status='A '),
'A/B/Z/zeta' : Item(status='A '),
'A/D/G/pi' : Item(status='U '),
'A/D/G/Z' : Item(status='A '),
'A/D/G/Z/zeta' : Item(status='A '),
})
# Create expected disk tree for the update
expected_disk = get_routine_disk_state(wc_dir)
expected_disk.tweak('iota', contents="This is the file 'gamma'.\napple")
expected_disk.tweak('A/D/gamma', contents="This is the file 'gamma'.\napple")
expected_disk.tweak('A/B/pi', contents="This is the file 'pi'.\nmelon")
expected_disk.tweak('A/D/G/pi', contents="This is the file 'pi'.\nmelon")
expected_disk.add({
'A/B/Z' : Item(),
'A/B/Z/zeta' : Item(contents="This is the file 'zeta'.\n"),
'A/D/G/Z' : Item(),
'A/D/G/Z/zeta' : Item(contents="This is the file 'zeta'.\n"),
})
# Create expected status tree for the update.
expected_status = get_routine_status_state(wc_dir)
expected_status.tweak(wc_rev=2)
expected_status.add({
'A/D/G/Z' : Item(status=' ', wc_rev=2),
'A/D/G/Z/zeta' : Item(status=' ', wc_rev=2),
'A/B/Z' : Item(status=' ', wc_rev=2),
'A/B/Z/zeta' : Item(status=' ', wc_rev=2),
})
expected_status.tweak('iota', 'A/B', switched='S')
svntest.actions.run_and_verify_update(wc_dir,
expected_output,
expected_disk,
expected_status)
#----------------------------------------------------------------------
def full_rev_update(sbox):
"reverse update wc that contains switched things"
sbox.build()
wc_dir = sbox.wc_dir
# Setup some switched things (don't bother verifying)
do_routine_switching(wc_dir, sbox.repo_url, 0)
# Commit some stuff (don't bother verifying)
commit_routine_switching(wc_dir, 0)
# Update to HEAD (tested elsewhere)
svntest.main.run_svn(None, 'up', wc_dir)
# Some convenient path variables
iota_path = sbox.ospath('iota')
gamma_path = sbox.ospath('A/D/gamma')
Bpi_path = sbox.ospath('A/B/pi')
BZ_path = sbox.ospath('A/B/Z')
Gpi_path = sbox.ospath('A/D/G/pi')
GZ_path = sbox.ospath('A/D/G/Z')
# Now, reverse update, back to the pre-commit state.
expected_output = svntest.wc.State(wc_dir, {
'iota' : Item(status='U '),
'A/D/gamma' : Item(status='U '),
'A/B/pi' : Item(status='U '),
'A/B/Z' : Item(status='D '),
'A/D/G/pi' : Item(status='U '),
'A/D/G/Z' : Item(status='D '),
})
# Create expected disk tree
expected_disk = get_routine_disk_state(wc_dir)
# Create expected status
expected_status = get_routine_status_state(wc_dir)
expected_status.tweak('iota', 'A/B', switched='S')
svntest.actions.run_and_verify_update(wc_dir,
expected_output,
expected_disk,
expected_status,
[], True,
'-r', '1', wc_dir)
#----------------------------------------------------------------------
def update_switched_things(sbox):
"update switched wc things to HEAD"
sbox.build()
wc_dir = sbox.wc_dir
# Setup some switched things (don't bother verifying)
do_routine_switching(wc_dir, sbox.repo_url, 0)
# Copy wc_dir to a backup location
wc_backup = sbox.add_wc_path('backup')
svntest.actions.duplicate_dir(wc_dir, wc_backup)
# Commit some stuff (don't bother verifying)
commit_routine_switching(wc_backup, 0)
# Some convenient path variables
iota_path = sbox.ospath('iota')
B_path = sbox.ospath('A/B')
# Create expected output tree for an update of wc_backup.
expected_output = svntest.wc.State(wc_dir, {
'iota' : Item(status='U '),
'A/B/pi' : Item(status='U '),
'A/B/Z' : Item(status='A '),
'A/B/Z/zeta' : Item(status='A '),
})
# Create expected disk tree for the update
expected_disk = get_routine_disk_state(wc_dir)
expected_disk.tweak('iota', contents="This is the file 'gamma'.\napple")
expected_disk.tweak('A/B/pi', contents="This is the file 'pi'.\nmelon")
expected_disk.add({
'A/B/Z' : Item(),
'A/B/Z/zeta' : Item("This is the file 'zeta'.\n"),
})
# Create expected status tree for the update.
expected_status = get_routine_status_state(wc_dir)
expected_status.tweak('iota', 'A/B', switched='S')
expected_status.tweak('A/B', 'A/B/pi', 'A/B/rho', 'A/B/tau', 'iota',
wc_rev=2)
expected_status.add({
'A/B/Z' : Item(status=' ', wc_rev=2),
'A/B/Z/zeta' : Item(status=' ', wc_rev=2),
})
svntest.actions.run_and_verify_update(wc_dir,
expected_output,
expected_disk,
expected_status,
[], False,
B_path,
iota_path)
#----------------------------------------------------------------------
def rev_update_switched_things(sbox):
"reverse update switched wc things to an older rev"
sbox.build()
wc_dir = sbox.wc_dir
# Setup some switched things (don't bother verifying)
do_routine_switching(wc_dir, sbox.repo_url, 0)
# Commit some stuff (don't bother verifying)
commit_routine_switching(wc_dir, 0)
# Some convenient path variables
iota_path = sbox.ospath('iota')
B_path = sbox.ospath('A/B')
# Update to HEAD (tested elsewhere)
svntest.main.run_svn(None, 'up', wc_dir)
# Now, reverse update, back to the pre-commit state.
expected_output = svntest.wc.State(wc_dir, {
'iota' : Item(status='U '),
'A/B/pi' : Item(status='U '),
'A/B/Z' : Item(status='D '),
})
# Create expected disk tree
expected_disk = get_routine_disk_state(wc_dir)
expected_disk.tweak('A/D/gamma', contents="This is the file 'gamma'.\napple")
expected_disk.tweak('A/D/G/pi', contents="This is the file 'pi'.\nmelon")
expected_disk.add({
'A/D/G/Z' : Item(),
'A/D/G/Z/zeta' : Item("This is the file 'zeta'.\n"),
})
# Create expected status tree for the update.
expected_status = get_routine_status_state(wc_dir)
expected_status.tweak(wc_rev=2)
expected_status.tweak('iota', 'A/B', switched='S')
expected_status.tweak('A/B', 'A/B/pi', 'A/B/rho', 'A/B/tau', 'iota',
wc_rev=1)
expected_status.add({
'A/D/G/Z' : Item(status=' ', wc_rev=2),
'A/D/G/Z/zeta' : Item(status=' ', wc_rev=2),
})
svntest.actions.run_and_verify_update(wc_dir,
expected_output,
expected_disk,
expected_status,
[], True,
'-r', '1',
B_path,
iota_path)
#----------------------------------------------------------------------
def log_switched_file(sbox):
"show logs for a switched file"
sbox.build()
wc_dir = sbox.wc_dir
# Setup some switched things (don't bother verifying)
do_routine_switching(wc_dir, sbox.repo_url, 0)
# edit and commit switched file 'iota'
iota_path = sbox.ospath('iota')
svntest.main.run_svn(None, 'ps', 'x', 'x', iota_path)
svntest.main.run_svn(None,
'ci', '-m',
'set prop on switched iota',
iota_path)
# log switched file 'iota'
exit_code, output, error = svntest.main.run_svn(None, 'log', iota_path)
for line in output:
if line.find("set prop on switched iota") != -1:
break
else:
raise svntest.Failure
#----------------------------------------------------------------------
def delete_subdir(sbox):
"switch that deletes a sub-directory"
sbox.build()
wc_dir = sbox.wc_dir
A_path = sbox.ospath('A')
A_url = sbox.repo_url + '/A'
A2_url = sbox.repo_url + '/A2'
A2_B_F_url = sbox.repo_url + '/A2/B/F'
svntest.actions.run_and_verify_svn(['Committing transaction...\n',
'Committed revision 2.\n'], [],
'cp', '-m', 'make copy', A_url, A2_url)
svntest.actions.run_and_verify_svn(['Committing transaction...\n',
'Committed revision 3.\n'], [],
'rm', '-m', 'delete subdir', A2_B_F_url)
expected_output = svntest.wc.State(wc_dir, {
'A/B/F' : Item(status='D '),
})
expected_disk = svntest.main.greek_state.copy()
expected_disk.remove('A/B/F')
expected_status = svntest.actions.get_virginal_state(wc_dir, 3)
expected_status.tweak('A', switched='S')
expected_status.remove('A/B/F')
expected_status.tweak('', 'iota', wc_rev=1)
# Used to fail with a 'directory not locked' error for A/B/F
svntest.actions.run_and_verify_switch(wc_dir, A_path, A2_url,
expected_output,
expected_disk,
expected_status,
[], False,
'--ignore-ancestry')
#----------------------------------------------------------------------
# Issue 1532: Switch a file to a dir: can't switch it back to the file
@XFail()
@Issue(1532)
def file_dir_file(sbox):
"switch a file to a dir and back to the file"
sbox.build(read_only = True)
wc_dir = sbox.wc_dir
file_path = sbox.ospath('iota')
file_url = sbox.repo_url + '/iota'
dir_url = sbox.repo_url + '/A/C'
svntest.actions.run_and_verify_svn(None, [], 'switch',
'--ignore-ancestry', dir_url, file_path)
if not os.path.isdir(file_path):
raise svntest.Failure
# The reason the following switch currently fails is that the node
# is determined to be a 'root', because it is switched against its parent.
# In this specific case the switch editor is designed to be rooted on the node
# itself instead of its ancestor. If you would use sbox.ospath('A') for
# file_path the switch works both ways.
svntest.actions.run_and_verify_svn(None, [], 'switch',
'--ignore-ancestry', file_url, file_path)
if not os.path.isfile(file_path):
raise svntest.Failure
#----------------------------------------------------------------------
# Issue 1751: "svn switch --non-recursive" does not switch existing files,
# and generates the wrong URL for new files.
def nonrecursive_switching(sbox):
"non-recursive switch"
sbox.build()
wc1_dir = sbox.wc_dir
wc2_dir = os.path.join(wc1_dir, 'wc2')
# "Trunk" will be the existing dir "A/", with existing file "mu".
# "Branch" will be the new dir "branch/version1/", with added file "newfile".
# "wc1" will hold the whole repository (including trunk and branch).
# "wc2" will hold the "trunk" and then be switched to the "branch".
# It is irrelevant that wc2 is located on disk as a sub-directory of wc1.
trunk_url = sbox.repo_url + '/A'
branch_url = sbox.repo_url + '/branch'
version1_url = branch_url + '/version1'
wc1_new_file = os.path.join(wc1_dir, 'branch', 'version1', 'newfile')
wc2_new_file = os.path.join(wc2_dir, 'newfile')
wc2_mu_file = os.path.join(wc2_dir, 'mu')
wc2_B_dir = os.path.join(wc2_dir, 'B')
wc2_C_dir = os.path.join(wc2_dir, 'C')
wc2_D_dir = os.path.join(wc2_dir, 'D')
# Check out the trunk as "wc2"
svntest.main.run_svn(None, 'co', trunk_url, wc2_dir)
# Make a branch, and add a new file, in "wc_dir" and repository
svntest.main.run_svn(None,
'mkdir', '-m', '', branch_url)
svntest.main.run_svn(None,
'cp', '-m', '', trunk_url, version1_url)
svntest.main.run_svn(None,
'up', wc1_dir)
svntest.main.file_append(wc1_new_file, "This is the file 'newfile'.\n")
svntest.main.run_svn(None, 'add', wc1_new_file)
sbox.simple_commit()
# Try to switch "wc2" to the branch (non-recursively)
svntest.actions.run_and_verify_svn(None, [], 'switch', '-N',
'--ignore-ancestry', version1_url, wc2_dir)
# Check the URLs of the (not switched) directories.
expected_infos = [
{ 'URL' : '.*/A/B$' },
{ 'URL' : '.*/A/C$' },
{ 'URL' : '.*/A/D$' },
]
svntest.actions.run_and_verify_info(expected_infos,
wc2_B_dir, wc2_C_dir, wc2_D_dir)
# Check the URLs of the switched files.
# ("svn status -u" might be a better check: it fails when newfile's URL
# is bad, and shows "S" when mu's URL is wrong.)
# mu: not switched
expected_infos = [
{ 'URL' : '.*/branch/version1/mu$' },
{ 'URL' : '.*/branch/version1/newfile$' }, # newfile: wrong URL
]
svntest.actions.run_and_verify_info(expected_infos,
wc2_mu_file, wc2_new_file)
#----------------------------------------------------------------------
def failed_anchor_is_target(sbox):
"anchor=target, try to replace a local-mod file"
sbox.build()
wc_dir = sbox.wc_dir
# Set up a switch from dir H, containing locally-modified file 'psi',
# to dir G, containing a directory 'psi'. Expect a tree conflict.
# Make a directory 'G/psi' in the repository.
G_url = sbox.repo_url + '/A/D/G'
G_psi_url = G_url + '/psi'
svntest.actions.run_and_verify_svn(['Committing transaction...\n',
'Committed revision 2.\n'], [],
'mkdir', '-m', 'log msg', G_psi_url)
# Modify the file 'H/psi' locally.
H_path = sbox.ospath('A/D/H')
psi_path = os.path.join(H_path, 'psi')
svntest.main.file_append(psi_path, "more text")
# This switch raises a tree conflict on 'psi', because of the local mods.
svntest.actions.run_and_verify_svn(svntest.verify.AnyOutput, [],
'switch', '--ignore-ancestry',
G_url, H_path)
expected_status = svntest.actions.get_virginal_state(wc_dir, 1)
expected_status.tweak('A/D/H', switched='S', wc_rev=2)
expected_status.tweak('A/D/H/psi', status='R ', copied='+',
wc_rev='-', treeconflict='C')
expected_status.remove('A/D/H/chi', 'A/D/H/omega')
expected_status.add({
'A/D/H/pi' : Item(status=' ', wc_rev=2),
'A/D/H/tau' : Item(status=' ', wc_rev=2),
'A/D/H/rho' : Item(status=' ', wc_rev=2),
})
svntest.actions.run_and_verify_status(wc_dir, expected_status)
# There was a bug whereby the failed switch left the wrong URL in
# the target directory H. Check for that.
expected_infos = [
{ 'URL' : '.*' + G_url + '$' },
]
svntest.actions.run_and_verify_info(expected_infos, H_path)
# Resolve tree conflict at psi.
svntest.actions.run_and_verify_resolved([psi_path])
# The switch should now be complete.
### Instead of "treeconflict=None" which means "don't check", we should
# check "treeconflict=' '" but the test suite doesn't do the right thing.
expected_status.tweak('A/D/H/psi', treeconflict=None)
svntest.actions.run_and_verify_status(wc_dir, expected_status)
#----------------------------------------------------------------------
# Issue #1826 - svn switch temporarily drops invalid URLs into the entries
# files (which become not-temporary if the switch fails).
def bad_intermediate_urls(sbox):
"bad intermediate urls in use"
sbox.build()
wc_dir = sbox.wc_dir
url = sbox.repo_url
A = sbox.ospath('A')
A_Z = sbox.ospath('A/Z')
url_A_C = url + '/A/C'
url_A_C_A = url + '/A/C/A'
url_A_C_A_Z = url + '/A/C/A/Z'
# We'll be switching our working copy to (a modified) A/C in the Greek tree.
# First, make an extra subdirectory in C to match one in the root, plus
# another one inside of that.
svntest.actions.run_and_verify_svn(['Committing transaction...\n',
'Committed revision 2.\n'], [],
'mkdir', '-m', 'log msg',
url_A_C_A, url_A_C_A_Z)
# Now, we'll drop a conflicting path under the root.
svntest.main.file_append(A_Z, 'Look, Mom, a ... tree conflict.')
#svntest.factory.make(sbox, """
# svn switch url/A/C wc_dir
# # svn info A
# # check that we can recover from the tree conflict
# rm A/Z
# svn up
# """)
#exit(0)
# svn switch url/A/C wc_dir
expected_output = svntest.wc.State(wc_dir, {
'A/mu' : Item(status='D '),
'A/Z' : Item(status=' ', treeconflict='C'),
'A/C' : Item(status='D '),
'A/B' : Item(status='D '),
'A/D' : Item(status='D '),
'iota' : Item(status='D '),
})
expected_disk = svntest.main.greek_state.copy()
expected_disk.remove('iota', 'A/B', 'A/B/E', 'A/B/E/beta', 'A/B/E/alpha',
'A/B/F', 'A/B/lambda', 'A/D', 'A/D/G', 'A/D/G/rho', 'A/D/G/pi',
'A/D/G/tau', 'A/D/H', 'A/D/H/psi', 'A/D/H/omega', 'A/D/H/chi',
'A/D/gamma', 'A/mu', 'A/C')
expected_disk.add({
'A/Z' : Item(contents="Look, Mom, a ... tree conflict."),
})
expected_status = actions.get_virginal_state(wc_dir, 2)
expected_status.remove('iota', 'A/B', 'A/B/E', 'A/B/E/beta', 'A/B/E/alpha',
'A/B/F', 'A/B/lambda', 'A/D', 'A/D/G', 'A/D/G/rho', 'A/D/G/pi',
'A/D/G/tau', 'A/D/H', 'A/D/H/psi', 'A/D/H/omega', 'A/D/H/chi',
'A/D/gamma', 'A/mu', 'A/C')
expected_status.add({
# Obstructed node is currently turned into a delete to allow resolving.
'A/Z' : Item(status='D ', treeconflict='C', wc_rev=2),
})
actions.run_and_verify_switch(wc_dir, wc_dir, url_A_C, expected_output,
expected_disk, expected_status,
[], False,
'--ignore-ancestry')
# However, the URL for wc/A should now reflect ^/A/C/A, not something else.
expected_infos = [
{ 'URL' : '.*/A/C/A$' },
]
svntest.actions.run_and_verify_info(expected_infos, A)
# check that we can recover from the tree conflict
# rm A/Z
os.remove(A_Z)
svntest.main.run_svn(None, 'revert', A_Z)
# svn up
expected_output = svntest.wc.State(wc_dir, {
})
expected_disk.tweak('A/Z', contents=None)
expected_status.tweak(status=' ', wc_rev='2')
expected_status.tweak('A/Z', treeconflict=None)
actions.run_and_verify_update(wc_dir, expected_output, expected_disk,
expected_status)
#----------------------------------------------------------------------
# Regression test for issue #1825: failed switch may corrupt
# working copy
@Issue(1825)
def obstructed_switch(sbox):
"obstructed switch"
#svntest.factory.make(sbox, """svn cp -m msgcopy url/A/B/E url/A/B/Esave
# svn rm A/B/E/alpha
# svn commit
# echo "hello" >> A/B/E/alpha
# svn switch url/A/B/Esave A/B/E
# svn status
# svn info A/B/E/alpha""")
sbox.build()
wc_dir = sbox.wc_dir
url = sbox.repo_url
A_B_E = sbox.ospath('A/B/E')
A_B_E_alpha = sbox.ospath('A/B/E/alpha')
url_A_B_E = url + '/A/B/E'
url_A_B_Esave = url + '/A/B/Esave'
# svn cp -m msgcopy url/A/B/E url/A/B/Esave
expected_stdout = [
'Committing transaction...\n',
'Committed revision 2.\n',
]
actions.run_and_verify_svn2(expected_stdout, [], 0, 'cp', '-m',
'msgcopy', url_A_B_E, url_A_B_Esave)
# svn rm A/B/E/alpha
expected_stdout = ['D ' + A_B_E_alpha + '\n']
actions.run_and_verify_svn2(expected_stdout, [], 0, 'rm',
A_B_E_alpha)
# svn commit
expected_output = svntest.wc.State(wc_dir, {
'A/B/E/alpha' : Item(verb='Deleting'),
})
expected_status = actions.get_virginal_state(wc_dir, 1)
expected_status.remove('A/B/E/alpha')
actions.run_and_verify_commit(wc_dir, expected_output, expected_status)
# echo "hello" >> A/B/E/alpha
main.file_append(A_B_E_alpha, 'hello')
# svn switch url/A/B/Esave A/B/E
expected_output = svntest.wc.State(wc_dir, {
'A/B/E/alpha' : Item(status=' ', treeconflict='C'),
})
expected_disk = svntest.main.greek_state.copy()
expected_disk.tweak('A/B/E/alpha', contents='hello')
expected_status.add({
'A/B/E/alpha' : Item(status='D ', treeconflict='C', wc_rev=3),
})
expected_status.tweak('A/B/E', wc_rev='3', switched='S')
expected_status.tweak('A/B/E/beta', wc_rev='3')
actions.run_and_verify_switch(wc_dir, A_B_E, url_A_B_Esave,
expected_output, expected_disk,
expected_status,
[], False, '--ignore-ancestry')
# svn status
expected_status.add({
'A/B/Esave' : Item(status=' '),
'A/B/Esave/beta' : Item(status=' '),
'A/B/Esave/alpha' : Item(status=' '),
})
actions.run_and_verify_unquiet_status(wc_dir, expected_status)
# svn info A/B/E/alpha
expected_stdout = verify.RegexOutput(
".*local file unversioned, incoming file add upon switch",
match_all=False)
actions.run_and_verify_svn2(expected_stdout, [], 0, 'info',
A_B_E_alpha)
#----------------------------------------------------------------------
# Issue 2353.
def commit_mods_below_switch(sbox):
"commit with mods below switch"
sbox.build()
wc_dir = sbox.wc_dir
C_path = sbox.ospath('A/C')
B_url = sbox.repo_url + '/A/B'
expected_output = svntest.wc.State(wc_dir, {
'A/C/E' : Item(status='A '),
'A/C/E/alpha' : Item(status='A '),
'A/C/E/beta' : Item(status='A '),
'A/C/F' : Item(status='A '),
'A/C/lambda' : Item(status='A '),
})
expected_disk = svntest.main.greek_state.copy()
expected_disk.add({
'A/C/E' : Item(),
'A/C/E/alpha' : Item(contents="This is the file 'alpha'.\n"),
'A/C/E/beta' : Item(contents="This is the file 'beta'.\n"),
'A/C/F' : Item(),
'A/C/lambda' : Item(contents="This is the file 'lambda'.\n"),
})
expected_status = svntest.actions.get_virginal_state(wc_dir, 1)
expected_status.tweak('A/C', switched='S')
expected_status.add({
'A/C/E' : Item(status=' ', wc_rev=1),
'A/C/E/alpha' : Item(status=' ', wc_rev=1),
'A/C/E/beta' : Item(status=' ', wc_rev=1),
'A/C/F' : Item(status=' ', wc_rev=1),
'A/C/lambda' : Item(status=' ', wc_rev=1),
})
svntest.actions.run_and_verify_switch(wc_dir, C_path, B_url,
expected_output,
expected_disk,
expected_status,
[],
False, '--ignore-ancestry')
D_path = sbox.ospath('A/D')
svntest.actions.run_and_verify_svn(None, [],
'propset', 'x', 'x', C_path, D_path)
expected_status.tweak('A/C', 'A/D', status=' M')
svntest.actions.run_and_verify_status(wc_dir, expected_status)
expected_output = svntest.wc.State(wc_dir, {
'A/C' : Item(verb='Sending'),
'A/D' : Item(verb='Sending'),
})
expected_status.tweak('A/C', 'A/D', status=' ', wc_rev=2)
# A/C erroneously classified as a wc root caused the commit to fail
# with "'A/C/E' is missing or not locked"
svntest.actions.run_and_verify_commit(wc_dir,
expected_output, expected_status,
[], C_path, D_path)
#----------------------------------------------------------------------
# Issue 2306.
def refresh_read_only_attribute(sbox):
"refresh the WC file system read-only attribute "
# This test will fail when run as root. Since that's normal
# behavior, just skip the test.
if os.name == 'posix':
if os.geteuid() == 0:
raise svntest.Skip('Test doesn\'t work as uid 0')
sbox.build()
wc_dir = sbox.wc_dir
# Create a branch.
url = sbox.repo_url + '/A'
branch_url = sbox.repo_url + '/A-branch'
svntest.actions.run_and_verify_svn(['Committing transaction...\n',
'Committed revision 2.\n'], [],
'cp', '-m', 'svn:needs-lock not set',
url, branch_url)
# Set the svn:needs-lock property on a file from the "trunk".
A_path = sbox.ospath('A')
mu_path = os.path.join(A_path, 'mu')
svntest.actions.run_and_verify_svn(None, [],
'ps', 'svn:needs-lock', '1', mu_path)
# Commit the propset of svn:needs-lock.
expected_output = svntest.wc.State(wc_dir, {
'A/mu' : Item(verb='Sending'),
})
expected_status = svntest.actions.get_virginal_state(wc_dir, 1)
expected_status.tweak('A/mu', wc_rev=3)
svntest.actions.run_and_verify_commit(wc_dir,
expected_output, expected_status,
[], mu_path)
# The file on which svn:needs-lock was set is now expected to be read-only.
if os.access(mu_path, os.W_OK):
raise svntest.Failure("'%s' expected to be read-only after having had "
"its svn:needs-lock property set" % mu_path)
# Switch to the branch with the WC state from before the propset of
# svn:needs-lock.
expected_output = svntest.wc.State(wc_dir, {
'A/mu' : Item(status=' U'),
})
expected_disk = svntest.main.greek_state.copy()
expected_status = svntest.actions.get_virginal_state(wc_dir, 3)
expected_status.tweak('', wc_rev=1)
expected_status.tweak('iota', wc_rev=1)
expected_status.tweak('A', switched='S')
svntest.actions.run_and_verify_switch(wc_dir, A_path, branch_url,
expected_output,
expected_disk,
expected_status,
[],
False, '--ignore-ancestry')
# The file with we set svn:needs-lock on should now be writable, but
# is still read-only!
if not os.access(mu_path, os.W_OK):
raise svntest.Failure("'%s' expected to be writable after being switched "
"to a branch on which its svn:needs-lock property "
"is not set" % mu_path)
# Check that switch can't change the repository root.
def switch_change_repos_root(sbox):
"switch shouldn't allow changing repos root"
sbox.build()
wc_dir = sbox.wc_dir
repo_url = sbox.repo_url
other_repo_url = repo_url
# Strip trailing slashes and add something bogus to that other URL.
while other_repo_url[-1] == '/':
other_repos_url = other_repo_url[:-1]
other_repo_url = other_repo_url + "_bogus"
other_A_url = other_repo_url + "/A"
A_wc_dir = sbox.ospath('A')
# Test 1: A switch that changes to a non-existing repo shouldn't work.
expected_err = ".*Unable to open repository.*|.*Could not open.*|"\
".*Could not find.*|.*No repository found.*"
svntest.actions.run_and_verify_svn(None,
expected_err,
'switch', '--ignore-ancestry',
other_A_url, A_wc_dir)
# Test 2: A switch that changes the repo root part of the URL shouldn't work.
other_repo_dir, other_repo_url = sbox.add_repo_path('other')
other_A_url = other_repo_url + "/A"
svntest.main.create_repos(other_repo_dir)
svntest.actions.run_and_verify_svn(None,
".*UUID.*",
'switch', '--ignore-ancestry',
other_A_url, A_wc_dir)
# Make sure we didn't break the WC.
expected_status = svntest.actions.get_virginal_state(wc_dir, 1)
svntest.actions.run_and_verify_status(wc_dir, expected_status)
#----------------------------------------------------------------------
def forced_switch(sbox):
"forced switch tolerates obstructions to adds"
sbox.build(read_only = True)
# Dir obstruction
G_path = sbox.ospath('A/B/F/G')
os.mkdir(G_path)
# Faux file obstructions
shutil.copyfile(sbox.ospath('A/D/gamma'),
sbox.ospath('A/B/F/gamma'))
shutil.copyfile(sbox.ospath('A/D/G/tau'),
sbox.ospath('A/B/F/G/tau'))
# Real file obstruction
pi_path = sbox.ospath('A/B/F/G/pi')
svntest.main.file_write(pi_path,
"This is the OBSTRUCTING file 'pi'.\n")
# Non-obstructing dir and file
I_path = sbox.ospath('A/B/F/I')
os.mkdir(I_path)
upsilon_path = os.path.join(G_path, 'upsilon')
svntest.main.file_write(upsilon_path,
"This is the unversioned file 'upsilon'.\n")
# Setup expected results of switch.
expected_output = svntest.wc.State(sbox.wc_dir, {
"A/B/F/gamma" : Item(status='E '),
"A/B/F/G" : Item(status='E '),
"A/B/F/G/pi" : Item(status='E '),
"A/B/F/G/rho" : Item(status='A '),
"A/B/F/G/tau" : Item(status='E '),
"A/B/F/H" : Item(status='A '),
"A/B/F/H/chi" : Item(status='A '),
"A/B/F/H/omega" : Item(status='A '),
"A/B/F/H/psi" : Item(status='A '),
})
expected_disk = svntest.main.greek_state.copy()
expected_disk.add({
"A/B/F/gamma" : Item("This is the file 'gamma'.\n"),
"A/B/F/G" : Item(),
"A/B/F/G/pi" : Item("This is the OBSTRUCTING file 'pi'.\n"),
"A/B/F/G/rho" : Item("This is the file 'rho'.\n"),
"A/B/F/G/tau" : Item("This is the file 'tau'.\n"),
"A/B/F/G/upsilon" : Item("This is the unversioned file 'upsilon'.\n"),
"A/B/F/H" : Item(),
"A/B/F/H/chi" : Item("This is the file 'chi'.\n"),
"A/B/F/H/omega" : Item("This is the file 'omega'.\n"),
"A/B/F/H/psi" : Item("This is the file 'psi'.\n"),
"A/B/F/I" : Item(),
})
expected_status = svntest.actions.get_virginal_state(sbox.wc_dir, 1)
expected_status.tweak('A/B/F', switched='S')
expected_status.add({
"A/B/F/gamma" : Item(status=' ', wc_rev=1),
"A/B/F/G" : Item(status=' ', wc_rev=1),
"A/B/F/G/pi" : Item(status='M ', wc_rev=1),
"A/B/F/G/rho" : Item(status=' ', wc_rev=1),
"A/B/F/G/tau" : Item(status=' ', wc_rev=1),
"A/B/F/H" : Item(status=' ', wc_rev=1),
"A/B/F/H/chi" : Item(status=' ', wc_rev=1),
"A/B/F/H/omega" : Item(status=' ', wc_rev=1),
"A/B/F/H/psi" : Item(status=' ', wc_rev=1),
})
# Do the switch and check the results in three ways.
F_path = sbox.ospath('A/B/F')
AD_url = sbox.repo_url + '/A/D'
svntest.actions.run_and_verify_switch(sbox.wc_dir, F_path, AD_url,
expected_output,
expected_disk,
expected_status, [], False,
'--force', '--ignore-ancestry')
#----------------------------------------------------------------------
def forced_switch_failures(sbox):
"forced switch detects tree conflicts"
# svntest.factory.make(sbox,
# """
# # Add a directory to obstruct a file.
# mkdir A/B/F/pi
#
# # Add a file to obstruct a directory.
# echo "The file 'H'" > A/C/H
#
# # Test three cases where forced switch should cause a tree conflict
#
# # 1) A forced switch that tries to add a file when an unversioned
# # directory of the same name already exists. (Currently fails)
# svn switch --force url/A/D A/C
#
# # 2) A forced switch that tries to add a dir when a file of the same
# # name already exists. (Tree conflict)
# svn switch --force url/A/D/G A/B/F
# svn info A/B/F/pi
#
# # 3) A forced update that tries to add a directory when a versioned
# # directory of the same name already exists.
#
# # Make dir A/D/H/I in repos.
# svn mkdir -m "Log message" url/A/D/H/I
#
# # Make A/D/G/I and co A/D/H/I into it.
# mkdir A/D/G/I
# svn co url/A/D/H/I A/D/G/I
#
# # Try the forced switch. A/D/G/I obstructs the dir A/D/G/I coming
# # from the repos, causing an error.
# svn switch --force url/A/D/H A/D/G
#
# # Delete all three obstructions and finish the update.
# rm -rf A/D/G/I
# rm A/B/F/pi
# rm A/C/H
#
# # A/B/F is switched to A/D/G
# # A/C is switched to A/D
# # A/D/G is switched to A/D/H
# svn up
# """)
# exit(0)
sbox.build()
wc_dir = sbox.wc_dir
url = sbox.repo_url
A_B_F = sbox.ospath('A/B/F')
A_B_F_pi = sbox.ospath('A/B/F/pi')
A_C = sbox.ospath('A/C')
A_C_H = sbox.ospath('A/C/H')
A_D_G = sbox.ospath('A/D/G')
A_D_G_I = sbox.ospath('A/D/G/I')
url_A_D = url + '/A/D'
url_A_D_G = url + '/A/D/G'
url_A_D_H = url + '/A/D/H'
url_A_D_H_I = url + '/A/D/H/I'
# Add a directory to obstruct a file.
# mkdir A/B/F/pi
os.makedirs(A_B_F_pi)
# Add a file to obstruct a directory.
# echo "The file 'H'" > A/C/H
main.file_write(A_C_H, "The file 'H'\n")
# Test three cases where forced switch should cause a tree conflict
# 1) A forced switch that tries to add a directory when an unversioned
# file of the same name already exists. (Currently fails)
# svn switch --force url/A/D A/C
expected_output = svntest.wc.State(wc_dir, {
'A/C/G' : Item(status='A '),
'A/C/G/pi' : Item(status='A '),
'A/C/G/rho' : Item(status='A '),
'A/C/G/tau' : Item(status='A '),
'A/C/gamma' : Item(status='A '),
'A/C/H' : Item(status=' ', treeconflict='C'),
'A/C/H/psi' : Item(status=' ', treeconflict='A'),
'A/C/H/omega' : Item(status=' ', treeconflict='A'),
'A/C/H/chi' : Item(status=' ', treeconflict='A'),
})
expected_disk = svntest.main.greek_state.copy()
expected_disk.add({
'A/C/gamma' : Item(contents="This is the file 'gamma'.\n"),
'A/C/G' : Item(),
'A/C/G/pi' : Item(contents="This is the file 'pi'.\n"),
'A/C/G/rho' : Item(contents="This is the file 'rho'.\n"),
'A/C/G/tau' : Item(contents="This is the file 'tau'.\n"),
'A/C/H' : Item(contents="The file 'H'\n"),
'A/B/F/pi' : Item(),
})
expected_status = actions.get_virginal_state(wc_dir, 1)
expected_status.add({
'A/C/G' : Item(status=' ', wc_rev='1'),
'A/C/G/rho' : Item(status=' ', wc_rev='1'),
'A/C/G/tau' : Item(status=' ', wc_rev='1'),
'A/C/G/pi' : Item(status=' ', wc_rev='1'),
'A/C/H' : Item(status='D ', treeconflict='C', wc_rev='1'),
'A/C/H/psi' : Item(status='D ', wc_rev='1'),
'A/C/H/omega' : Item(status='D ', wc_rev='1'),
'A/C/H/chi' : Item(status='D ', wc_rev='1'),
'A/C/gamma' : Item(status=' ', wc_rev='1'),
})
expected_status.tweak('A/C', switched='S')
actions.run_and_verify_switch(wc_dir, A_C, url_A_D, expected_output,
expected_disk, expected_status, [], False,
'--force',
'--ignore-ancestry')
# 2) A forced switch that tries to add a file when a dir of the same
# name already exists. (Tree conflict)
# svn switch --force url/A/D/G A/B/F
expected_output = svntest.wc.State(wc_dir, {
'A/B/F/rho' : Item(status='A '),
'A/B/F/pi' : Item(status=' ', treeconflict='C'),
'A/B/F/tau' : Item(status='A '),
})
expected_disk.add({
'A/B/F/rho' : Item(contents="This is the file 'rho'.\n"),
'A/B/F/tau' : Item(contents="This is the file 'tau'.\n"),
})
expected_status.add({
'A/B/F/tau' : Item(status=' ', wc_rev='1'),
'A/B/F/pi' : Item(status='D ', treeconflict='C', wc_rev='1'),
'A/B/F/rho' : Item(status=' ', wc_rev='1'),
})
expected_status.tweak('A/B/F', switched='S')
actions.run_and_verify_switch(wc_dir, A_B_F, url_A_D_G, expected_output,
expected_disk, expected_status, [], False,
'--force',
'--ignore-ancestry')
# svn info A/B/F/pi
expected_stdout = verify.ExpectedOutput(
'Tree conflict: local dir unversioned, incoming file add upon switch\n',
match_all=False)
actions.run_and_verify_svn2(expected_stdout, [], 0, 'info',
A_B_F_pi)
# 3) A forced update that tries to add a directory when a versioned
# directory of the same name already exists.
# Make dir A/D/H/I in repos.
# svn mkdir -m "Log message" url/A/D/H/I
expected_stdout = verify.UnorderedOutput([
'Committing transaction...\n',
'Committed revision 2.\n',
])
actions.run_and_verify_svn2(expected_stdout, [], 0, 'mkdir',
'-m', 'Log message', url_A_D_H_I)
# Make A/D/G/I and co A/D/H/I into it.
# mkdir A/D/G/I
os.makedirs(A_D_G_I)
# svn co url/A/D/H/I A/D/G/I
expected_output = svntest.wc.State(wc_dir, {})
expected_disk.add({
'A/D/G/I' : Item(),
})
exit_code, so, se = svntest.actions.run_and_verify_svn(
['Checked out revision 2.\n'], [],
"co", url_A_D_H_I, A_D_G_I)
# Try the forced switch. A/D/G/I obstructs the dir A/D/G/I coming
# from the repos, causing an error.
# svn switch --force url/A/D/H A/D/G
expected_output = svntest.wc.State(wc_dir, {
'A/D/G/chi' : Item(status='A '),
'A/D/G/tau' : Item(status='D '),
'A/D/G/omega' : Item(status='A '),
'A/D/G/psi' : Item(status='A '),
'A/D/G/I' : Item(verb='Skipped'),
'A/D/G/rho' : Item(status='D '),
'A/D/G/pi' : Item(status='D '),
})
actions.run_and_verify_switch(wc_dir, A_D_G, url_A_D_H, expected_output,
None, None, [], False,
'--force', '--ignore-ancestry')
# Delete all three obstructions and finish the update.
# rm -rf A/D/G/I
main.safe_rmtree(A_D_G_I)
# rm A/B/F/pi
main.safe_rmtree(A_B_F_pi)
# rm A/C/H
os.remove(A_C_H)
# Resolve the tree conflict on A_C_H and A_B_F_pi
svntest.main.run_svn(None, 'resolved', A_C_H)
svntest.main.run_svn(None, 'revert', A_B_F_pi)
# A/B/F is switched to A/D/G
# A/C is switched to A/D
# A/D/G is switched to A/D/H
# svn up
expected_output = svntest.wc.State(wc_dir, {
'A/C/H/I' : Item(status='A '),
'A/D/G/I' : Item(status='A '),
'A/D/H/I' : Item(status='A '),
})
expected_disk.remove('A/D/G/tau', 'A/D/G/rho', 'A/D/G/pi')
expected_disk.add({
'A/D/H/I' : Item(),
'A/D/G/omega' : Item(contents="This is the file 'omega'.\n"),
'A/D/G/psi' : Item(contents="This is the file 'psi'.\n"),
'A/D/G/chi' : Item(contents="This is the file 'chi'.\n"),
'A/C/H/I' : Item(),
'A/C/H/omega' : Item(contents="This is the file 'omega'.\n"),
'A/C/H/psi' : Item(contents="This is the file 'psi'.\n"),
'A/C/H/chi' : Item(contents="This is the file 'chi'.\n"),
})
expected_disk.tweak('A/C/H', contents=None)
expected_disk.tweak('A/B/F/pi', contents="This is the file 'pi'.\n")
expected_status.remove('A/D/G/tau', 'A/D/G/rho', 'A/D/G/pi')
expected_status.add({
'A/D/G/omega' : Item(status=' ', wc_rev='2'),
'A/D/G/I' : Item(status=' ', wc_rev='2'),
'A/D/G/psi' : Item(status=' ', wc_rev='2'),
'A/D/G/chi' : Item(status=' ', wc_rev='2'),
'A/D/H/I' : Item(status=' ', wc_rev='2'),
'A/C/H/psi' : Item(status=' ', wc_rev='2'),
'A/C/H/omega' : Item(status=' ', wc_rev='2'),
'A/C/H/chi' : Item(status=' ', wc_rev='2'),
'A/C/H/I' : Item(status=' ', wc_rev='2'),
})
expected_status.tweak(wc_rev='2', status=' ')
expected_status.tweak('A/B/F/pi', 'A/C/H', treeconflict=None)
expected_status.tweak('A/D/G', switched='S')
svntest.main.run_svn(None, 'revert', '-R', sbox.ospath('A/C/H'))
actions.run_and_verify_update(wc_dir, expected_output, expected_disk,
expected_status)
def switch_with_obstructing_local_adds(sbox):
"switch tolerates WC adds"
sbox.build(read_only = True)
# Dir obstruction scheduled for addition without history.
G_path = sbox.ospath('A/B/F/G')
os.mkdir(G_path)
# File obstructions scheduled for addition without history.
# Contents identical to additions from switch.
gamma_copy_path = sbox.ospath('A/B/F/gamma')
shutil.copyfile(sbox.ospath('A/D/gamma'),
gamma_copy_path)
shutil.copyfile(sbox.ospath('A/D/G/tau'),
sbox.ospath('A/B/F/G/tau'))
# File obstruction scheduled for addition without history.
# Contents conflict with addition from switch.
pi_path = sbox.ospath('A/B/F/G/pi')
svntest.main.file_write(pi_path,
"This is the OBSTRUCTING file 'pi'.\n")
# Non-obstructing dir and file scheduled for addition without history.
I_path = sbox.ospath('A/B/F/I')
os.mkdir(I_path)
upsilon_path = os.path.join(G_path, 'upsilon')
svntest.main.file_write(upsilon_path,
"This is the unversioned file 'upsilon'.\n")
# Add the above obstructions.
svntest.actions.run_and_verify_svn(None, [],
'add', G_path, I_path,
gamma_copy_path)
# Setup expected results of switch.
expected_output = svntest.wc.State(sbox.wc_dir, {
"A/B/F/gamma" : Item(status=' ', treeconflict='C'),
"A/B/F/G" : Item(status=' ', treeconflict='C'),
'A/B/F/G/tau' : Item(status=' ', treeconflict='A'),
'A/B/F/G/rho' : Item(status=' ', treeconflict='A'),
'A/B/F/G/pi' : Item(status=' ', treeconflict='A'),
"A/B/F/H" : Item(status='A '),
"A/B/F/H/chi" : Item(status='A '),
"A/B/F/H/omega" : Item(status='A '),
"A/B/F/H/psi" : Item(status='A '),
})
expected_disk = svntest.main.greek_state.copy()
expected_disk.add({
"A/B/F/gamma" : Item("This is the file 'gamma'.\n"),
"A/B/F/G" : Item(),
"A/B/F/G/pi" : Item("This is the OBSTRUCTING file 'pi'.\n"),
"A/B/F/G/tau" : Item("This is the file 'tau'.\n"),
"A/B/F/G/upsilon" : Item("This is the unversioned file 'upsilon'.\n"),
"A/B/F/H" : Item(),
"A/B/F/H/chi" : Item("This is the file 'chi'.\n"),
"A/B/F/H/omega" : Item("This is the file 'omega'.\n"),
"A/B/F/H/psi" : Item("This is the file 'psi'.\n"),
"A/B/F/I" : Item(),
})
expected_status = svntest.actions.get_virginal_state(sbox.wc_dir, 1)
expected_status.tweak('A/B/F', switched='S')
expected_status.add({
'A/B/F/gamma' : Item(status='R ', treeconflict='C', wc_rev='1'),
'A/B/F/G' : Item(status='R ', treeconflict='C', wc_rev='1'),
'A/B/F/G/pi' : Item(status='A ', wc_rev='-', entry_status='R ', entry_rev='1'),
'A/B/F/G/tau' : Item(status='A ', wc_rev='-', entry_status='R ', entry_rev='1'),
'A/B/F/G/upsilon' : Item(status='A ', wc_rev='-', entry_rev='0'),
'A/B/F/G/rho' : Item(status='D ', wc_rev='1'),
'A/B/F/H' : Item(status=' ', wc_rev='1'),
'A/B/F/H/chi' : Item(status=' ', wc_rev='1'),
'A/B/F/H/omega' : Item(status=' ', wc_rev='1'),
'A/B/F/H/psi' : Item(status=' ', wc_rev='1'),
'A/B/F/I' : Item(status='A ', wc_rev='-', entry_rev='0'),
})
# Do the switch and check the results in three ways.
F_path = sbox.ospath('A/B/F')
D_url = sbox.repo_url + '/A/D'
svntest.actions.run_and_verify_switch(sbox.wc_dir, F_path, D_url,
expected_output,
expected_disk,
expected_status,
[], False,
'--ignore-ancestry')
#----------------------------------------------------------------------
def switch_scheduled_add(sbox):
"switch a scheduled-add file"
sbox.build(read_only = True)
wc_dir = sbox.wc_dir
file_path = sbox.ospath('stub_file')
switch_url = sbox.repo_url + '/iota'
nodo_path = sbox.ospath('nodo')
svntest.main.file_append(file_path, "")
svntest.actions.run_and_verify_svn(None, [],
'add', file_path)
svntest.actions.run_and_verify_svn(None,
"svn: E200007: Cannot switch '.*file' " +
"because it is not in the repository yet",
'switch', '--ignore-ancestry',
switch_url, file_path)
svntest.actions.run_and_verify_svn(None,
"svn: E155010: The node '.*nodo' was not",
'switch', '--ignore-ancestry',
switch_url, nodo_path)
#----------------------------------------------------------------------
@SkipUnless(server_has_mergeinfo)
def mergeinfo_switch_elision(sbox):
"mergeinfo does not elide post switch"
# When a switch adds mergeinfo on a path which is identical to
# the mergeinfo on one of the path's subtrees, the subtree's mergeinfo
# should *not* elide! If it did this could result in the switch of a
# pristine tree producing local mods.
sbox.build()
wc_dir = sbox.wc_dir
# Some paths we'll care about
lambda_path = sbox.ospath('A/B_COPY_1/lambda')
B_COPY_1_path = sbox.ospath('A/B_COPY_1')
B_COPY_2_path = sbox.ospath('A/B_COPY_2')
E_COPY_2_path = sbox.ospath('A/B_COPY_2/E')
alpha_path = sbox.ospath('A/B/E/alpha')
beta_path = sbox.ospath('A/B/E/beta')
# Make branches A/B_COPY_1 and A/B_COPY_2
expected_stdout = verify.UnorderedOutput([
"A " + B_COPY_1_path + "\n",
"A " + sbox.ospath('A/B_COPY_1/lambda') + "\n",
"A " + sbox.ospath('A/B_COPY_1/E') + "\n",
"A " + sbox.ospath('A/B_COPY_1/E/alpha') + "\n",
"A " + sbox.ospath('A/B_COPY_1/E/beta') + "\n",
"A " + sbox.ospath('A/B_COPY_1/F') + "\n",
])
svntest.actions.run_and_verify_svn(expected_stdout, [], 'copy',
sbox.repo_url + "/A/B", B_COPY_1_path)
expected_stdout = verify.UnorderedOutput([
"A " + B_COPY_2_path + "\n",
"A " + sbox.ospath('A/B_COPY_2/lambda') + "\n",
"A " + sbox.ospath('A/B_COPY_2/E') + "\n",
"A " + sbox.ospath('A/B_COPY_2/E/alpha') + "\n",
"A " + sbox.ospath('A/B_COPY_2/E/beta') + "\n",
"A " + sbox.ospath('A/B_COPY_2/F') + "\n",
])
svntest.actions.run_and_verify_svn(expected_stdout, [], 'copy',
sbox.repo_url + "/A/B", B_COPY_2_path)
expected_output = svntest.wc.State(wc_dir, {
'A/B_COPY_1' : Item(verb='Adding'),
'A/B_COPY_2' : Item(verb='Adding')
})
expected_status = svntest.actions.get_virginal_state(wc_dir, 1)
expected_status.add({
"A/B_COPY_1" : Item(status=' ', wc_rev=2),
"A/B_COPY_1/lambda" : Item(status=' ', wc_rev=2),
"A/B_COPY_1/E" : Item(status=' ', wc_rev=2),
"A/B_COPY_1/E/alpha" : Item(status=' ', wc_rev=2),
"A/B_COPY_1/E/beta" : Item(status=' ', wc_rev=2),
"A/B_COPY_1/F" : Item(status=' ', wc_rev=2),
"A/B_COPY_2" : Item(status=' ', wc_rev=2),
"A/B_COPY_2/lambda" : Item(status=' ', wc_rev=2),
"A/B_COPY_2/E" : Item(status=' ', wc_rev=2),
"A/B_COPY_2/E/alpha" : Item(status=' ', wc_rev=2),
"A/B_COPY_2/E/beta" : Item(status=' ', wc_rev=2),
"A/B_COPY_2/F" : Item(status=' ', wc_rev=2),
})
svntest.actions.run_and_verify_commit(wc_dir,
expected_output,
expected_status)
# Make some changes under A/B
# r3 - modify and commit A/B/E/beta
svntest.main.file_write(beta_path, "New content")
expected_output = svntest.wc.State(wc_dir,
{'A/B/E/beta' : Item(verb='Sending')})
expected_status.tweak('A/B/E/beta', wc_rev=3)
svntest.actions.run_and_verify_commit(wc_dir, expected_output,
expected_status)
# r4 - modify and commit A/B/E/alpha
svntest.main.file_write(alpha_path, "New content")
expected_output = svntest.wc.State(wc_dir,
{'A/B/E/alpha' : Item(verb='Sending')})
expected_status.tweak('A/B/E/alpha', wc_rev=4)
svntest.actions.run_and_verify_commit(wc_dir, expected_output,
expected_status)
# Merge r2:4 into A/B_COPY_1
expected_output = svntest.wc.State(B_COPY_1_path, {
'E/alpha' : Item(status='U '),
'E/beta' : Item(status='U '),
})
expected_mergeinfo_output = svntest.wc.State(B_COPY_1_path, {
'' : Item(status=' U'),
})
expected_elision_output = svntest.wc.State(B_COPY_1_path, {
})
expected_merge_status = svntest.wc.State(B_COPY_1_path, {
'' : Item(status=' M', wc_rev=2),
'lambda' : Item(status=' ', wc_rev=2),
'E' : Item(status=' ', wc_rev=2),
'E/alpha' : Item(status='M ', wc_rev=2),
'E/beta' : Item(status='M ', wc_rev=2),
'F' : Item(status=' ', wc_rev=2),
})
expected_merge_disk = svntest.wc.State('', {
'' : Item(props={SVN_PROP_MERGEINFO : '/A/B:3-4'}),
'lambda' : Item("This is the file 'lambda'.\n"),
'E' : Item(),
'E/alpha' : Item("New content"),
'E/beta' : Item("New content"),
'F' : Item(),
})
expected_skip = svntest.wc.State(B_COPY_1_path, { })
svntest.actions.run_and_verify_merge(B_COPY_1_path, '2', '4',
sbox.repo_url + '/A/B', None,
expected_output,
expected_mergeinfo_output,
expected_elision_output,
expected_merge_disk,
expected_merge_status,
expected_skip,
check_props=True)
# r5 - Commit the merge into A/B_COPY_1/E
expected_output = svntest.wc.State(
wc_dir,
{'A/B_COPY_1' : Item(verb='Sending'),
'A/B_COPY_1/E/alpha' : Item(verb='Sending'),
'A/B_COPY_1/E/beta' : Item(verb='Sending'),
})
expected_status.tweak('A/B_COPY_1', wc_rev=5)
expected_status.tweak('A/B_COPY_1/E/alpha', wc_rev=5)
expected_status.tweak('A/B_COPY_1/E/beta', wc_rev=5)
expected_status.tweak('A/B_COPY_1/lambda', wc_rev=2)
svntest.actions.run_and_verify_commit(wc_dir, expected_output,
expected_status)
# Merge r2:4 into A/B_COPY_2/E
expected_output = svntest.wc.State(E_COPY_2_path, {
'alpha' : Item(status='U '),
'beta' : Item(status='U '),
})
expected_mergeinfo_output = svntest.wc.State(E_COPY_2_path, {
'' : Item(status=' U'),
})
expected_elision_output = svntest.wc.State(E_COPY_2_path, {
})
expected_merge_status = svntest.wc.State(E_COPY_2_path, {
'' : Item(status=' M', wc_rev=2),
'alpha' : Item(status='M ', wc_rev=2),
'beta' : Item(status='M ', wc_rev=2),
})
expected_merge_disk = svntest.wc.State('', {
'' : Item(props={SVN_PROP_MERGEINFO : '/A/B/E:3-4'}),
'alpha' : Item("New content"),
'beta' : Item("New content"),
})
expected_skip = svntest.wc.State(E_COPY_2_path, { })
svntest.actions.run_and_verify_merge(E_COPY_2_path, '2', '4',
sbox.repo_url + '/A/B/E', None,
expected_output,
expected_mergeinfo_output,
expected_elision_output,
expected_merge_disk,
expected_merge_status,
expected_skip,
check_props=True)
# Switch A/B_COPY_2 to URL of A/B_COPY_1. The local mergeinfo for r1,3-4
# on A/B_COPY_2/E is identical to the mergeinfo added to A/B_COPY_2 as a
# result of the switch, but we leave the former in place.
# Setup expected results of switch.
expected_output = svntest.wc.State(sbox.wc_dir, {
"A/B_COPY_2" : Item(status=' U'),
"A/B_COPY_2/E/alpha" : Item(status='G '),
"A/B_COPY_2/E/beta" : Item(status='G '),
})
expected_disk = svntest.main.greek_state.copy()
expected_disk.tweak("A/B/E/alpha", contents="New content")
expected_disk.tweak("A/B/E/beta", contents="New content")
expected_disk.add({
"A/B_COPY_1" : Item(props={SVN_PROP_MERGEINFO : '/A/B:3-4'}),
"A/B_COPY_1/E" : Item(),
"A/B_COPY_1/F" : Item(),
"A/B_COPY_1/lambda" : Item("This is the file 'lambda'.\n"),
"A/B_COPY_1/E/alpha" : Item("New content"),
"A/B_COPY_1/E/beta" : Item("New content"),
"A/B_COPY_2" : Item(props={SVN_PROP_MERGEINFO : '/A/B:3-4'}),
"A/B_COPY_2/E" : Item(props={SVN_PROP_MERGEINFO : '/A/B/E:3-4'}),
"A/B_COPY_2/F" : Item(),
"A/B_COPY_2/lambda" : Item("This is the file 'lambda'.\n"),
"A/B_COPY_2/E/alpha" : Item("New content"),
"A/B_COPY_2/E/beta" : Item("New content"),
})
expected_status = svntest.actions.get_virginal_state(sbox.wc_dir, 1)
expected_status.tweak("A/B/E/beta", wc_rev=3)
expected_status.tweak("A/B/E/alpha", wc_rev=4)
expected_status.add({
"A/B_COPY_1" : Item(status=' ', wc_rev=5),
"A/B_COPY_1/E" : Item(status=' ', wc_rev=2),
"A/B_COPY_1/F" : Item(status=' ', wc_rev=2),
"A/B_COPY_1/lambda" : Item(status=' ', wc_rev=2),
"A/B_COPY_1/E/alpha" : Item(status=' ', wc_rev=5),
"A/B_COPY_1/E/beta" : Item(status=' ', wc_rev=5),
"A/B_COPY_2" : Item(status=' ', wc_rev=5, switched='S'),
"A/B_COPY_2/E" : Item(status=' M', wc_rev=5),
"A/B_COPY_2/F" : Item(status=' ', wc_rev=5),
"A/B_COPY_2/lambda" : Item(status=' ', wc_rev=5),
"A/B_COPY_2/E/alpha" : Item(status=' ', wc_rev=5),
"A/B_COPY_2/E/beta" : Item(status=' ', wc_rev=5),
})
svntest.actions.run_and_verify_switch(sbox.wc_dir,
B_COPY_2_path,
sbox.repo_url + "/A/B_COPY_1",
expected_output,
expected_disk,
expected_status,
[], True,
'--ignore-ancestry')
# Now check a switch which reverses and earlier switch and leaves
# a path in an unswitched state.
#
# Switch A/B_COPY_1/lambda to iota. Use propset to give A/B_COPY/lambda
# the mergeinfo '/A/B/lambda:1,3-4'. Then switch A/B_COPY_1/lambda back
# to A/B_COPY_1/lambda. The local mergeinfo for r1,3-4 should remain on
# A/B_COPY_1/lambda.
expected_output = svntest.wc.State(sbox.wc_dir, {
"A/B_COPY_1/lambda" : Item(status='U '),
})
expected_disk.tweak("A/B_COPY_1/lambda",
contents="This is the file 'iota'.\n")
expected_status.tweak("A/B_COPY_1/lambda", wc_rev=5, switched='S')
svntest.actions.run_and_verify_switch(sbox.wc_dir,
lambda_path,
sbox.repo_url + "/iota",
expected_output,
expected_disk,
expected_status,
[], True,
'--ignore-ancestry')
svntest.actions.run_and_verify_svn(["property '" + SVN_PROP_MERGEINFO +
"' set on '" + lambda_path + "'" +
"\n"], [], 'ps', SVN_PROP_MERGEINFO,
'/A/B/lambda:3-4', lambda_path)
expected_output = svntest.wc.State(sbox.wc_dir, {
"A/B_COPY_1/lambda" : Item(status='U '),
})
expected_disk.tweak("A/B_COPY_1/lambda",
contents="This is the file 'lambda'.\n",
props={SVN_PROP_MERGEINFO : '/A/B/lambda:3-4'})
expected_status.tweak("A/B_COPY_1/lambda", switched=None, status=' M')
svntest.actions.run_and_verify_switch(sbox.wc_dir,
lambda_path,
sbox.repo_url + "/A/B_COPY_1/lambda",
expected_output,
expected_disk,
expected_status,
[], True,
'--ignore-ancestry')
#----------------------------------------------------------------------
def switch_with_depth(sbox):
"basic tests to verify switch along with depth"
sbox.build(read_only = True)
# Form some paths and URLs required
wc_dir = sbox.wc_dir
repo_url = sbox.repo_url
AD_url = repo_url + '/A/D'
AB_url = repo_url + '/A/B'
AB_path = sbox.ospath('A/B')
# Set up expected results of 'switch --depth=empty'
expected_output = svntest.wc.State(wc_dir, {})
expected_disk = svntest.main.greek_state.copy()
expected_status = svntest.actions.get_virginal_state(wc_dir, 1)
expected_status.tweak('A/B', switched='S')
expected_status.tweak('A/B/lambda', switched='S')
expected_status.tweak('A/B/E', switched='S')
expected_status.tweak('A/B/F', switched='S')
# Do 'switch --depth=empty' and check the results in three ways.
svntest.actions.run_and_verify_switch(wc_dir, AB_path, AD_url,
expected_output,
expected_disk,
expected_status,
[], False,
'--depth', 'empty', '--ignore-ancestry')
# Set up expected results for reverting 'switch --depth=empty'
expected_output = svntest.wc.State(wc_dir, {})
expected_disk = svntest.main.greek_state.copy()
expected_status = svntest.actions.get_virginal_state(wc_dir, 1)
svntest.actions.run_and_verify_switch(wc_dir, AB_path, AB_url,
expected_output,
expected_disk,
expected_status,
[], False,
'--depth', 'empty', '--ignore-ancestry')
# Set up expected results of 'switch --depth=files'
expected_output = svntest.wc.State(wc_dir, {
'A/B/lambda' : Item(status='D '),
'A/B/gamma' : Item(status='A '),
})
expected_disk = svntest.main.greek_state.copy()
expected_disk.remove('A/B/lambda')
expected_disk.add({
'A/B/gamma' : Item("This is the file 'gamma'.\n")
})
expected_status = svntest.actions.get_virginal_state(wc_dir, 1)
expected_status.remove('A/B/lambda')
expected_status.add({
'A/B/gamma' : Item(status=' ', wc_rev=1)
})
expected_status.tweak('A/B', switched='S')
expected_status.tweak('A/B/E', switched='S')
expected_status.tweak('A/B/F', switched='S')
# Do 'switch --depth=files' and check the results in three ways.
svntest.actions.run_and_verify_switch(wc_dir, AB_path, AD_url,
expected_output,
expected_disk,
expected_status,
[], False,
'--depth', 'files', '--ignore-ancestry')
# Set up expected results for reverting 'switch --depth=files'
expected_output = svntest.wc.State(wc_dir, {
'A/B/gamma' : Item(status='D '),
'A/B/lambda' : Item(status='A '),
})
expected_disk = svntest.main.greek_state.copy()
expected_status = svntest.actions.get_virginal_state(wc_dir, 1)
svntest.actions.run_and_verify_switch(wc_dir, AB_path, AB_url,
expected_output,
expected_disk,
expected_status,
[], False,
'--depth', 'files', '--ignore-ancestry')
# Putting the depth=immediates stuff in a subroutine, because we're
# going to run it at least twice.
def sw_depth_imm():
# Set up expected results of 'switch --depth=immediates'
expected_output = svntest.wc.State(wc_dir, {
'A/B/lambda' : Item(status='D '),
'A/B/E' : Item(status='D '),
'A/B/F' : Item(status='D '),
'A/B/gamma' : Item(status='A '),
'A/B/G' : Item(status='A '),
'A/B/H' : Item(status='A '),
})
expected_disk = svntest.main.greek_state.copy()
expected_disk.remove('A/B/lambda', 'A/B/E/beta', 'A/B/E/alpha',
'A/B/E', 'A/B/F')
expected_disk.add({
'A/B/gamma' : Item("This is the file 'gamma'.\n"),
'A/B/G' : Item(),
'A/B/H' : Item(),
})
expected_status = svntest.actions.get_virginal_state(wc_dir, 1)
expected_status.remove('A/B/lambda', 'A/B/E/beta', 'A/B/E/alpha',
'A/B/E', 'A/B/F')
expected_status.add({
'A/B/gamma' : Item(status=' ', wc_rev=1),
'A/B/G' : Item(status=' ', wc_rev=1),
'A/B/H' : Item(status=' ', wc_rev=1)
})
expected_status.tweak('A/B', switched='S')
# Do 'switch --depth=immediates' and check the results in three ways.
svntest.actions.run_and_verify_switch(wc_dir, AB_path, AD_url,
expected_output,
expected_disk,
expected_status,
[], False,
'--depth', 'immediates',
'--ignore-ancestry')
sw_depth_imm()
# Set up expected results for reverting 'switch --depth=immediates'.
# (Reverting with default [infinite] depth, so that the result is a
# standard Greek Tree working copy again.)
expected_output = svntest.wc.State(wc_dir, {
'A/B/gamma' : Item(status='D '),
'A/B/G' : Item(status='D '),
'A/B/H' : Item(status='D '),
'A/B/lambda' : Item(status='A '),
'A/B/E' : Item(status='A '),
'A/B/E/alpha' : Item(status='A '),
'A/B/E/beta' : Item(status='A '),
'A/B/F' : Item(status='A '),
})
expected_disk = svntest.main.greek_state.copy()
expected_status = svntest.actions.get_virginal_state(wc_dir, 1)
svntest.actions.run_and_verify_switch(wc_dir, AB_path, AB_url,
expected_output,
expected_disk,
expected_status,
[], False,
'--ignore-ancestry')
# Okay, repeat 'switch --depth=immediates'. (Afterwards we'll
# 'switch --depth=infinity', to test going all the way.)
sw_depth_imm()
# Set up expected results of 'switch --depth=infinity'
expected_output = svntest.wc.State(wc_dir, {
'A/B/gamma' : Item(status='D '),
'A/B/G' : Item(status='D '),
'A/B/H' : Item(status='D '),
'A/B/lambda' : Item(status='A '),
'A/B/E' : Item(status='A '),
'A/B/E/alpha' : Item(status='A '),
'A/B/E/beta' : Item(status='A '),
'A/B/F' : Item(status='A '),
})
expected_disk = svntest.main.greek_state.copy()
expected_status = svntest.actions.get_virginal_state(wc_dir, 1)
# Do the 'switch --depth=infinity' and check the results in three ways.
svntest.actions.run_and_verify_switch(wc_dir, AB_path, AB_url,
expected_output,
expected_disk,
expected_status,
[], False,
'--depth', 'infinity',
'--ignore-ancestry')
#----------------------------------------------------------------------
def switch_to_dir_with_peg_rev(sbox):
"switch to dir@peg where dir doesn't exist in HEAD"
sbox.build()
wc_dir = sbox.wc_dir
repo_url = sbox.repo_url
# prepare two dirs X and Y in rev. 2
X_path = sbox.ospath('X')
Y_path = sbox.ospath('Y')
svntest.main.run_svn(None, 'mkdir', X_path, Y_path)
sbox.simple_commit(message='log message')
# change tau in rev. 3
ADG_path = sbox.ospath('A/D/G')
tau_path = os.path.join(ADG_path, 'tau')
svntest.main.file_append(tau_path, "new line\n")
sbox.simple_commit(message='log message')
# delete A/D/G in rev. 4
svntest.main.run_svn(None, 'up', wc_dir)
svntest.main.run_svn(None, 'rm', ADG_path)
sbox.simple_commit(message='log message')
# Test 1: switch X to A/D/G@2
ADG_url = repo_url + '/A/D/G'
expected_output = svntest.wc.State(wc_dir, {
'X/pi' : Item(status='A '),
'X/rho' : Item(status='A '),
'X/tau' : Item(status='A '),
})
expected_disk = svntest.main.greek_state.copy()
expected_disk.add({
'X' : Item(),
'X/pi' : Item("This is the file 'pi'.\n"),
'X/rho' : Item("This is the file 'rho'.\n"),
'X/tau' : Item("This is the file 'tau'.\n"),
'Y' : Item(),
})
expected_disk.remove('A/D/G', 'A/D/G/pi', 'A/D/G/rho', 'A/D/G/tau')
expected_status = svntest.actions.get_virginal_state(wc_dir, 3)
expected_status.remove('A/D/G', 'A/D/G/pi', 'A/D/G/rho', 'A/D/G/tau')
expected_status.add({
'X' : Item(status=' ', wc_rev=2, switched='S'),
'X/pi' : Item(status=' ', wc_rev=2),
'X/rho' : Item(status=' ', wc_rev=2),
'X/tau' : Item(status=' ', wc_rev=2),
'Y' : Item(status=' ', wc_rev=3)
})
# Do the switch to rev. 2 of /A/D/G@3.
svntest.actions.run_and_verify_switch(wc_dir, X_path, ADG_url + '@3',
expected_output,
expected_disk,
expected_status,
[], False,
'-r', '2', '--ignore-ancestry')
def switch_urls_with_spaces(sbox):
"switch file and dir to url containing spaces"
sbox.build()
wc_dir = sbox.wc_dir
repo_url = sbox.repo_url
# add file and directory with spaces in their names.
XYZ_path = sbox.ospath('X Y Z')
ABC_path = sbox.ospath('A B C')
svntest.main.run_svn(None, 'mkdir', XYZ_path, ABC_path)
tpm_path = sbox.ospath('tau pau mau')
bbb_path = sbox.ospath('bar baz bal')
svntest.main.file_write(tpm_path, "This is the file 'tau pau mau'.\n")
svntest.main.file_write(bbb_path, "This is the file 'bar baz bal'.\n")
svntest.main.run_svn(None, 'add', tpm_path, bbb_path)
sbox.simple_commit(message='log message')
# Test 1: switch directory 'A B C' to url 'X Y Z'
XYZ_url = repo_url + '/X Y Z'
expected_output = svntest.wc.State(wc_dir, {
})
expected_disk = svntest.main.greek_state.copy()
expected_disk.add({
'X Y Z' : Item(),
'A B C' : Item(),
'tau pau mau' : Item("This is the file 'tau pau mau'.\n"),
'bar baz bal' : Item("This is the file 'bar baz bal'.\n"),
})
expected_status = svntest.actions.get_virginal_state(wc_dir, 1)
expected_status.add({
'X Y Z' : Item(status=' ', wc_rev=2),
'A B C' : Item(status=' ', wc_rev=2, switched='S'),
'tau pau mau' : Item(status=' ', wc_rev=2),
'bar baz bal' : Item(status=' ', wc_rev=2),
})
svntest.actions.run_and_verify_switch(wc_dir, ABC_path, XYZ_url,
expected_output,
expected_disk,
expected_status,
[],
False, '--ignore-ancestry')
# Test 2: switch file 'bar baz bal' to 'tau pau mau'
tpm_url = repo_url + '/tau pau mau'
expected_output = svntest.wc.State(wc_dir, {
'bar baz bal' : Item(status='U '),
})
expected_disk = svntest.main.greek_state.copy()
expected_disk.add({
'X Y Z' : Item(),
'A B C' : Item(),
'tau pau mau' : Item("This is the file 'tau pau mau'.\n"),
'bar baz bal' : Item("This is the file 'tau pau mau'.\n"),
})
expected_status = svntest.actions.get_virginal_state(wc_dir, 1)
expected_status.add({
'X Y Z' : Item(status=' ', wc_rev=2),
'A B C' : Item(status=' ', wc_rev=2, switched='S'),
'tau pau mau' : Item(status=' ', wc_rev=2),
'bar baz bal' : Item(status=' ', wc_rev=2, switched='S'),
})
svntest.actions.run_and_verify_switch(wc_dir, bbb_path, tpm_url,
expected_output,
expected_disk,
expected_status,
[],
False, '--ignore-ancestry')
def switch_to_dir_with_peg_rev2(sbox):
"switch to old rev of now renamed branch"
sbox.build()
wc_dir = sbox.wc_dir
repo_url = sbox.repo_url
# prepare dir X in rev. 2
X_path = sbox.ospath('X')
svntest.main.run_svn(None, 'mkdir', X_path)
sbox.simple_commit(message='log message')
# make a change in ADG in rev. 3
tau_path = sbox.ospath('A/D/G/tau')
svntest.main.file_append(tau_path, "extra line\n")
sbox.simple_commit(message='log message')
# Rename ADG to ADY in rev 4
svntest.main.run_svn(None, 'up', wc_dir)
ADG_path = sbox.ospath('A/D/G')
ADY_path = sbox.ospath('A/D/Y')
svntest.main.run_svn(None, 'mv', ADG_path, ADY_path)
sbox.simple_commit(message='log message')
# Test switch X to rev 2 of A/D/Y@HEAD
ADY_url = sbox.repo_url + '/A/D/Y'
expected_output = svntest.wc.State(wc_dir, {
'X/pi' : Item(status='A '),
'X/rho' : Item(status='A '),
'X/tau' : Item(status='A '),
})
expected_disk = svntest.main.greek_state.copy()
expected_disk.add({
'X' : Item(),
'X/pi' : Item("This is the file 'pi'.\n"),
'X/rho' : Item("This is the file 'rho'.\n"),
'X/tau' : Item("This is the file 'tau'.\n"),
'A/D/Y' : Item(),
'A/D/Y/pi' : Item("This is the file 'pi'.\n"),
'A/D/Y/rho' : Item("This is the file 'rho'.\n"),
'A/D/Y/tau' : Item("This is the file 'tau'.\nextra line\n"),
})
expected_disk.remove('A/D/G', 'A/D/G/pi', 'A/D/G/rho', 'A/D/G/tau')
expected_status = svntest.actions.get_virginal_state(wc_dir, 3)
expected_status.remove('A/D/G', 'A/D/G/pi', 'A/D/G/rho', 'A/D/G/tau')
expected_status.add({
'X' : Item(status=' ', wc_rev=2, switched='S'),
'X/pi' : Item(status=' ', wc_rev=2),
'X/rho' : Item(status=' ', wc_rev=2),
'X/tau' : Item(status=' ', wc_rev=2),
'A/D/Y' : Item(status=' ', wc_rev=4),
'A/D/Y/pi' : Item(status=' ', wc_rev=4),
'A/D/Y/rho' : Item(status=' ', wc_rev=4),
'A/D/Y/tau' : Item(status=' ', wc_rev=4),
})
svntest.actions.run_and_verify_switch(wc_dir, X_path, ADY_url + '@HEAD',
expected_output,
expected_disk,
expected_status, [], False,
'-r', '2', '--ignore-ancestry')
def switch_to_root(sbox):
"switch a folder to the root of its repository"
sbox.build(read_only = True)
wc_dir = sbox.wc_dir
repo_url = sbox.repo_url
ADG_path = sbox.ospath('A/D/G')
# Test switch /A/D/G to /
AD_url = sbox.repo_url + '/A/D'
expected_output = svntest.wc.State(wc_dir, {
'A/D/G/pi' : Item(status='D '),
'A/D/G/rho' : Item(status='D '),
'A/D/G/tau' : Item(status='D '),
'A/D/G/A' : Item(status='A '),
'A/D/G/A/B' : Item(status='A '),
'A/D/G/A/B/lambda' : Item(status='A '),
'A/D/G/A/B/E' : Item(status='A '),
'A/D/G/A/B/E/alpha' : Item(status='A '),
'A/D/G/A/B/E/beta' : Item(status='A '),
'A/D/G/A/B/F' : Item(status='A '),
'A/D/G/A/mu' : Item(status='A '),
'A/D/G/A/C' : Item(status='A '),
'A/D/G/A/D' : Item(status='A '),
'A/D/G/A/D/gamma' : Item(status='A '),
'A/D/G/A/D/G' : Item(status='A '),
'A/D/G/A/D/G/pi' : Item(status='A '),
'A/D/G/A/D/G/rho' : Item(status='A '),
'A/D/G/A/D/G/tau' : Item(status='A '),
'A/D/G/A/D/H' : Item(status='A '),
'A/D/G/A/D/H/chi' : Item(status='A '),
'A/D/G/A/D/H/omega' : Item(status='A '),
'A/D/G/A/D/H/psi' : Item(status='A '),
'A/D/G/iota' : Item(status='A '),
})
expected_disk = svntest.main.greek_state.copy()
expected_disk.remove('A/D/G/pi', 'A/D/G/rho', 'A/D/G/tau')
expected_disk.add_state('A/D/G', svntest.main.greek_state.copy())
expected_status = svntest.actions.get_virginal_state(wc_dir, 1)
expected_status.remove('A/D/G/pi', 'A/D/G/rho', 'A/D/G/tau')
expected_status.add_state('A/D/G',
svntest.actions.get_virginal_state(wc_dir + '/A/D/G', 1))
expected_status.tweak('A/D/G', switched = 'S')
svntest.actions.run_and_verify_switch(wc_dir, ADG_path, sbox.repo_url,
expected_output,
expected_disk,
expected_status,
[],
False, '--ignore-ancestry')
#----------------------------------------------------------------------
# Make sure that switch continue after deleting locally modified
# directories, as update and merge do.
@Issue(2505)
def tolerate_local_mods(sbox):
"tolerate deletion of a directory with local mods"
sbox.build()
wc_dir = sbox.wc_dir
A_path = sbox.ospath('A')
L_path = os.path.join(A_path, 'L')
LM_path = os.path.join(L_path, 'local_mod')
A_url = sbox.repo_url + '/A'
A2_url = sbox.repo_url + '/A2'
svntest.actions.run_and_verify_svn(['Committing transaction...\n',
'Committed revision 2.\n'], [],
'cp', '-m', 'make copy', A_url, A2_url)
os.mkdir(L_path)
svntest.main.run_svn(None, 'add', L_path)
sbox.simple_commit(message='Commit added folder')
# locally modified versioned file
svntest.main.file_write(LM_path, 'Locally modified file.\n', 'w+')
sbox.simple_add('A/L/local_mod')
expected_output = svntest.wc.State(wc_dir, {
'A/L' : Item(status=' ', treeconflict='C'),
})
expected_disk = svntest.main.greek_state.copy()
expected_disk.add({
'A/L' : Item(),
'A/L/local_mod' : Item(contents='Locally modified file.\n'),
})
expected_status = svntest.actions.get_virginal_state(wc_dir, 3)
expected_status.tweak('', 'iota', wc_rev=1)
expected_status.tweak('A', switched='S')
expected_status.add({
'A/L' : Item(status='A ', copied='+', treeconflict='C', wc_rev='-'),
'A/L/local_mod' : Item(status='A ', wc_rev='-'),
})
# Used to fail with locally modified or unversioned files
svntest.actions.run_and_verify_switch(wc_dir, A_path, A2_url,
expected_output,
expected_disk,
expected_status,
[],
False, '--ignore-ancestry')
#----------------------------------------------------------------------
# Detect tree conflicts among files and directories,
# edited or deleted in a deep directory structure.
#
# See use cases 1-3 in notes/tree-conflicts/use-cases.txt for background.
# Note that we do not try to track renames. The only difference from
# the behavior of Subversion 1.4 and 1.5 is the conflicted status of the
# parent directory.
# convenience definitions
leaf_edit = svntest.deeptrees.deep_trees_leaf_edit
tree_del = svntest.deeptrees.deep_trees_tree_del
leaf_del = svntest.deeptrees.deep_trees_leaf_del
disk_after_leaf_edit = svntest.deeptrees.deep_trees_after_leaf_edit
disk_after_leaf_del = svntest.deeptrees.deep_trees_after_leaf_del
disk_after_tree_del = svntest.deeptrees.deep_trees_after_tree_del
deep_trees_conflict_output = svntest.deeptrees.deep_trees_conflict_output
deep_trees_conflict_output_skipped = \
svntest.deeptrees.deep_trees_conflict_output_skipped
deep_trees_status_local_tree_del = \
svntest.deeptrees.deep_trees_status_local_tree_del
deep_trees_status_local_leaf_edit = \
svntest.deeptrees.deep_trees_status_local_leaf_edit
DeepTreesTestCase = svntest.deeptrees.DeepTreesTestCase
j = os.path.join
def tree_conflicts_on_switch_1_1(sbox):
"tree conflicts 1.1: tree del, leaf edit on switch"
sbox.build()
# use case 1, as in notes/tree-conflicts/use-cases.txt
# 1.1) local tree delete, incoming leaf edit
expected_output = deep_trees_conflict_output.copy()
expected_output.add({
'DDD/D1/D2' : Item(status=' ', treeconflict='U'),
'DDD/D1/D2/D3' : Item(status=' ', treeconflict='U'),
'DDD/D1/D2/D3/zeta' : Item(status=' ', treeconflict='A'),
'DD/D1/D2' : Item(status=' ', treeconflict='U'),
'DD/D1/D2/epsilon' : Item(status=' ', treeconflict='A'),
'DF/D1/beta' : Item(status=' ', treeconflict='U'),
'D/D1/delta' : Item(status=' ', treeconflict='A'),
'DDF/D1/D2' : Item(status=' ', treeconflict='U'),
'DDF/D1/D2/gamma' : Item(status=' ', treeconflict='U')
})
expected_disk = svntest.wc.State('', {
'F' : Item(),
'D' : Item(),
'DF' : Item(),
'DD' : Item(),
'DDF' : Item(),
'DDD' : Item(),
})
# The files delta, epsilon, and zeta are incoming additions, but since
# they are all within locally deleted trees they should also be schedule
# for deletion.
expected_status = deep_trees_status_local_tree_del.copy()
expected_status.add({
'D/D1/delta' : Item(status='D '),
'DD/D1/D2/epsilon' : Item(status='D '),
'DDD/D1/D2/D3/zeta' : Item(status='D '),
})
expected_status.tweak('', switched='S')
# Update to the target rev.
expected_status.tweak(wc_rev=3)
expected_info = {
'F/alpha' : {
'Tree conflict' :
'^local file delete, incoming file edit upon switch'
+ ' Source left: .file.*/F/alpha@2'
+ ' Source right: .file.*/F/alpha@3$',
},
'DF/D1' : {
'Tree conflict' :
'^local dir delete, incoming dir edit upon switch'
+ ' Source left: .dir.*/DF/D1@2'
+ ' Source right: .dir.*/DF/D1@3$',
},
'DDF/D1' : {
'Tree conflict' :
'^local dir delete, incoming dir edit upon switch'
+ ' Source left: .dir.*/DDF/D1@2'
+ ' Source right: .dir.*/DDF/D1@3$',
},
'D/D1' : {
'Tree conflict' :
'^local dir delete, incoming dir edit upon switch'
+ ' Source left: .dir.*/D/D1@2'
+ ' Source right: .dir.*/D/D1@3$',
},
'DD/D1' : {
'Tree conflict' :
'^local dir delete, incoming dir edit upon switch'
+ ' Source left: .dir.*/DD/D1@2'
+ ' Source right: .dir.*/DD/D1@3$',
},
'DDD/D1' : {
'Tree conflict' :
'^local dir delete, incoming dir edit upon switch'
+ ' Source left: .dir.*/DDD/D1@2'
+ ' Source right: .dir.*/DDD/D1@3$',
},
}
svntest.deeptrees.deep_trees_run_tests_scheme_for_switch(sbox,
[ DeepTreesTestCase("local_tree_del_incoming_leaf_edit",
tree_del,
leaf_edit,
expected_output,
expected_disk,
expected_status,
expected_info = expected_info) ] )
@Issue(3334)
def tree_conflicts_on_switch_1_2(sbox):
"tree conflicts 1.2: tree del, leaf del on switch"
sbox.build()
# 1.2) local tree delete, incoming leaf delete
expected_output = deep_trees_conflict_output.copy()
expected_output.add({
'DD/D1/D2' : Item(status=' ', treeconflict='D'),
'DDF/D1/D2' : Item(status=' ', treeconflict='U'),
'DDF/D1/D2/gamma' : Item(status=' ', treeconflict='D'),
'DDD/D1/D2' : Item(status=' ', treeconflict='U'),
'DDD/D1/D2/D3' : Item(status=' ', treeconflict='D'),
'DF/D1/beta' : Item(status=' ', treeconflict='D'),
})
expected_disk = svntest.wc.State('', {
'F' : Item(),
'D' : Item(),
'DF' : Item(),
'DD' : Item(),
'DDF' : Item(),
'DDD' : Item(),
})
expected_status = deep_trees_status_local_tree_del.copy()
# Expect the incoming leaf deletes to actually occur. Even though they
# are within (or in the case of F/alpha and D/D1 are the same as) the
# trees locally scheduled for deletion we must still delete them and
# update the scheduled for deletion items to the target rev. Otherwise
# once the conflicts are resolved we still have a mixed-rev WC we can't
# commit without updating...which, you guessed it, raises tree conflicts
# again, repeat ad infinitum - see issue #3334.
#
# Update to the target rev.
expected_status.tweak(wc_rev=3)
expected_status.tweak('F/alpha',
'D/D1',
status='! ', wc_rev=None)
expected_status.tweak('', switched='S')
# Remove the incoming deletes from status and disk.
expected_status.remove('DD/D1/D2',
'DDD/D1/D2/D3',
'DDF/D1/D2/gamma',
'DF/D1/beta')
expected_info = {
'F/alpha' : {
'Tree conflict' :
'^local file delete, incoming file delete or move upon switch'
+ ' Source left: .file.*/F/alpha@2'
+ ' Source right: .none.*(/F/alpha@3)?$',
},
'DF/D1' : {
'Tree conflict' :
'^local dir delete, incoming dir edit upon switch'
+ ' Source left: .dir.*/DF/D1@2'
+ ' Source right: .dir.*/DF/D1@3$',
},
'DDF/D1' : {
'Tree conflict' :
'^local dir delete, incoming dir edit upon switch'
+ ' Source left: .dir.*/DDF/D1@2'
+ ' Source right: .dir.*/DDF/D1@3$',
},
'D/D1' : {
'Tree conflict' :
'^local dir delete, incoming dir delete or move upon switch'
+ ' Source left: .dir.*/D/D1@2'
+ ' Source right: .none.*(/D/D1@3)?$',
},
'DD/D1' : {
'Tree conflict' :
'^local dir delete, incoming dir edit upon switch'
+ ' Source left: .dir.*/DD/D1@2'
+ ' Source right: .dir.*/DD/D1@3$',
},
'DDD/D1' : {
'Tree conflict' :
'^local dir delete, incoming dir edit upon switch'
+ ' Source left: .dir.*/DDD/D1@2'
+ ' Source right: .dir.*/DDD/D1@3$',
},
}
svntest.deeptrees.deep_trees_run_tests_scheme_for_switch(sbox,
[ DeepTreesTestCase("local_tree_del_incoming_leaf_del",
tree_del,
leaf_del,
expected_output,
expected_disk,
expected_status,
expected_info = expected_info) ] )
@Issue(3334)
def tree_conflicts_on_switch_2_1(sbox):
"tree conflicts 2.1: leaf edit, tree del on switch"
# use case 2, as in notes/tree-conflicts/use-cases.txt
# 2.1) local leaf edit, incoming tree delete
expected_output = deep_trees_conflict_output
expected_disk = disk_after_leaf_edit.copy()
expected_status = deep_trees_status_local_leaf_edit.copy()
# The expectation on 'alpha' reflects partial progress on issue #3334.
expected_status.tweak('D/D1',
'F/alpha',
'DD/D1',
'DF/D1',
'DDD/D1',
'DDF/D1',
status='A ', copied='+', wc_rev='-')
# See the status of all the paths *under* the above six subtrees. Only the
# roots of the added subtrees show as schedule 'A', these childs paths show
# only that history is scheduled with the commit.
expected_status.tweak(
'DD/D1/D2',
'DDD/D1/D2',
'DDD/D1/D2/D3',
'DF/D1/beta',
'DDF/D1/D2',
'DDF/D1/D2/gamma',
copied='+', wc_rev='-')
expected_status.tweak('', switched='S')
expected_info = {
'F/alpha' : {
'Tree conflict' :
'^local file edit, incoming file delete or move upon switch'
+ ' Source left: .file.*/F/alpha@2'
+ ' Source right: .none.*(/F/alpha@3)?$',
},
'DF/D1' : {
'Tree conflict' :
'^local dir edit, incoming dir delete or move upon switch'
+ ' Source left: .dir.*/DF/D1@2'
+ ' Source right: .none.*(/DF/D1@3)?$',
},
'DDF/D1' : {
'Tree conflict' :
'^local dir edit, incoming dir delete or move upon switch'
+ ' Source left: .dir.*/DDF/D1@2'
+ ' Source right: .none.*(/DDF/D1@3)?$',
},
'D/D1' : {
'Tree conflict' :
'^local dir edit, incoming dir delete or move upon switch'
+ ' Source left: .dir.*/D/D1@2'
+ ' Source right: .none.*(/D/D1@3)?$',
},
'DD/D1' : {
'Tree conflict' :
'^local dir edit, incoming dir delete or move upon switch'
+ ' Source left: .dir.*/DD/D1@2'
+ ' Source right: .none.*(/DD/D1@3)?$',
},
'DDD/D1' : {
'Tree conflict' :
'^local dir edit, incoming dir delete or move upon switch'
+ ' Source left: .dir.*/DDD/D1@2'
+ ' Source right: .none.*(/DDD/D1@3)?$',
},
}
### D/D1/delta is locally-added during leaf_edit. when tree_del executes,
### it will delete D/D1, and the switch reschedules local D/D1 for
### local-copy from its original revision. however, right now, we cannot
### denote that delta is a local-add rather than a child of that D/D1 copy.
### thus, it appears in the status output as a (M)odified child.
svntest.deeptrees.deep_trees_run_tests_scheme_for_switch(sbox,
[ DeepTreesTestCase("local_leaf_edit_incoming_tree_del",
leaf_edit,
tree_del,
expected_output,
expected_disk,
expected_status,
expected_info = expected_info) ] )
def tree_conflicts_on_switch_2_2(sbox):
"tree conflicts 2.2: leaf del, tree del on switch"
# 2.2) local leaf delete, incoming tree delete
### Current behaviour fails to show conflicts when deleting
### a directory tree that has modifications. (Will be solved
### when dirs_same_p() is implemented)
expected_output = deep_trees_conflict_output
expected_disk = svntest.wc.State('', {
'DDF/D1/D2' : Item(),
'F' : Item(),
'D' : Item(),
'DF/D1' : Item(),
'DD/D1' : Item(),
'DDD/D1/D2' : Item(),
})
expected_status = svntest.deeptrees.deep_trees_virginal_state.copy()
expected_status.add({'' : Item(),
'F/alpha' : Item()})
expected_status.tweak(contents=None, status=' ', wc_rev=3)
expected_status.tweak('', switched='S')
# Expect the incoming tree deletes and the local leaf deletes to mean
# that all deleted paths are *really* gone, not simply scheduled for
# deletion.
expected_status.tweak('DD/D1', 'DF/D1', 'DDF/D1', 'DDD/D1',
status='A ', copied='+', treeconflict='C',
wc_rev='-')
expected_status.tweak('DDF/D1/D2', 'DDD/D1/D2',
copied='+', wc_rev='-')
expected_status.tweak('DD/D1/D2', 'DF/D1/beta', 'DDD/D1/D2/D3',
'DDF/D1/D2/gamma',
status='D ', copied='+', wc_rev='-')
expected_status.tweak('F/alpha', 'D/D1',
status='! ', treeconflict='C', wc_rev=None)
expected_info = {
'F/alpha' : {
'Tree conflict' :
'^local file delete, incoming file delete or move upon switch'
+ ' Source left: .file.*/F/alpha@2'
+ ' Source right: .none.*(/F/alpha@3)?$',
},
'DF/D1' : {
'Tree conflict' :
'^local dir edit, incoming dir delete or move upon switch'
+ ' Source left: .dir.*/DF/D1@2'
+ ' Source right: .none.*(/DF/D1@3)?$',
},
'DDF/D1' : {
'Tree conflict' :
'^local dir edit, incoming dir delete or move upon switch'
+ ' Source left: .dir.*/DDF/D1@2'
+ ' Source right: .none.*(/DDF/D1@3)?$',
},
'D/D1' : {
'Tree conflict' :
'^local dir delete, incoming dir delete or move upon switch'
+ ' Source left: .dir.*/D/D1@2'
+ ' Source right: .none.*(/D/D1@3)?$',
},
'DD/D1' : {
'Tree conflict' :
'^local dir edit, incoming dir delete or move upon switch'
+ ' Source left: .dir.*/DD/D1@2'
+ ' Source right: .none.*(/DD/D1@3)?$',
},
'DDD/D1' : {
'Tree conflict' :
'^local dir edit, incoming dir delete or move upon switch'
+ ' Source left: .dir.*/DDD/D1@2'
+ ' Source right: .none.*(/DDD/D1@3)?$',
},
}
svntest.deeptrees.deep_trees_run_tests_scheme_for_switch(sbox,
[ DeepTreesTestCase("local_leaf_del_incoming_tree_del",
leaf_del,
tree_del,
expected_output,
expected_disk,
expected_status,
expected_info = expected_info) ] )
def tree_conflicts_on_switch_3(sbox):
"tree conflicts 3: tree del, tree del on switch"
# use case 3, as in notes/tree-conflicts/use-cases.txt
# local tree delete, incoming tree delete
expected_output = deep_trees_conflict_output
expected_disk = svntest.wc.State('', {
'F' : Item(),
'D' : Item(),
'DF' : Item(),
'DD' : Item(),
'DDF' : Item(),
'DDD' : Item(),
})
expected_status = deep_trees_status_local_tree_del.copy()
expected_status.tweak('', switched='S')
# Expect the incoming tree deletes and the local tree deletes to mean
# that all deleted paths are *really* gone, not simply scheduled for
# deletion.
expected_status.tweak('F/alpha',
'D/D1',
'DD/D1',
'DF/D1',
'DDD/D1',
'DDF/D1',
status='! ', wc_rev=None)
# Remove from expected status and disk everything below the deleted paths.
expected_status.remove('DD/D1/D2',
'DF/D1/beta',
'DDD/D1/D2',
'DDD/D1/D2/D3',
'DDF/D1/D2',
'DDF/D1/D2/gamma',)
expected_info = {
'F/alpha' : {
'Tree conflict' :
'^local file delete, incoming file delete or move upon switch'
+ ' Source left: .file.*/F/alpha@2'
+ ' Source right: .none.*(/F/alpha@3)?$',
},
'DF/D1' : {
'Tree conflict' :
'^local dir delete, incoming dir delete or move upon switch'
+ ' Source left: .dir.*/DF/D1@2'
+ ' Source right: .none.*(/DF/D1@3)?$',
},
'DDF/D1' : {
'Tree conflict' :
'^local dir delete, incoming dir delete or move upon switch'
+ ' Source left: .dir.*/DDF/D1@2'
+ ' Source right: .none.*(/DDF/D1@3)?$',
},
'D/D1' : {
'Tree conflict' :
'^local dir delete, incoming dir delete or move upon switch'
+ ' Source left: .dir.*/D/D1@2'
+ ' Source right: .none.*(/D/D1@3)?$',
},
'DD/D1' : {
'Tree conflict' :
'^local dir delete, incoming dir delete or move upon switch'
+ ' Source left: .dir.*/DD/D1@2'
+ ' Source right: .none.*(/DD/D1@3)?$',
},
'DDD/D1' : {
'Tree conflict' :
'^local dir delete, incoming dir delete or move upon switch'
+ ' Source left: .dir.*/DDD/D1@2'
+ ' Source right: .none.*(/DDD/D1@3)?$',
},
}
svntest.deeptrees.deep_trees_run_tests_scheme_for_switch(sbox,
[ DeepTreesTestCase("local_tree_del_incoming_tree_del",
tree_del,
tree_del,
expected_output,
expected_disk,
expected_status,
expected_info = expected_info) ] )
def copy_with_switched_subdir(sbox):
"copy directory with switched subdir"
sbox.build()
wc_dir = sbox.wc_dir
D = sbox.ospath('A/D')
G = os.path.join(D, 'G')
E_url = sbox.repo_url + '/A/B/E'
R = sbox.ospath('R')
state = svntest.actions.get_virginal_state(wc_dir, 1)
# Verify before switching
svntest.actions.run_and_verify_status(wc_dir, state)
# Switch A/D/G
svntest.actions.run_and_verify_svn(None, [], 'switch',
'--ignore-ancestry', E_url, G)
state.tweak('A/D/G', switched='S')
state.remove('A/D/G/pi', 'A/D/G/rho', 'A/D/G/tau')
state.add({
'A/D/G/alpha' : Item(status=' ', wc_rev=1),
'A/D/G/beta' : Item(status=' ', wc_rev=1),
})
svntest.actions.run_and_verify_status(wc_dir, state)
# And now copy A/D and everything below it to R
svntest.actions.run_and_verify_svn(None, [], 'cp', D, R)
state.add({
'R' : Item(status='A ', copied='+', wc_rev='-'),
'R/gamma' : Item(status=' ', copied='+', wc_rev='-'),
'R/G/alpha' : Item(status=' ', copied='+', wc_rev='-'),
'R/G/beta' : Item(status=' ', copied='+', wc_rev='-'),
'R/H' : Item(status=' ', copied='+', wc_rev='-'),
'R/H/chi' : Item(status=' ', copied='+', wc_rev='-'),
'R/H/omega' : Item(status=' ', copied='+', wc_rev='-'),
'R/H/psi' : Item(status=' ', copied='+', wc_rev='-'),
'R/G' : Item(status='A ', copied='+', wc_rev='-'),
})
svntest.actions.run_and_verify_status(wc_dir, state)
sbox.simple_commit(message='Commit added folder')
# Additional test, it should commit to R/G/alpha.
svntest.main.run_svn(None, 'up', wc_dir)
svntest.main.file_append(sbox.ospath('R/G/alpha'), "apple")
sbox.simple_commit(message='Commit changed file')
# Checkout working copy to verify result
svntest.main.safe_rmtree(wc_dir, 1)
svntest.actions.run_and_verify_svn(None, [],
'checkout',
sbox.repo_url, wc_dir)
# Switch A/D/G again to recreate state
svntest.actions.run_and_verify_svn(None, [], 'switch',
'--ignore-ancestry', E_url, G)
# Clear the statuses
state.tweak(status=' ', copied=None, wc_rev='3', entry_status=None)
# But reset the switched state
state.tweak('A/D/G', switched='S')
svntest.actions.run_and_verify_status(wc_dir, state)
@Issue(3871)
def up_to_old_rev_with_subtree_switched_to_root(sbox):
"up to old rev with subtree switched to root"
sbox.build()
wc_dir = sbox.wc_dir
# Some paths we'll care about.
A_path = sbox.ospath('A')
branch_path = sbox.ospath('branch')
# Starting with a vanilla greek tree, create a branch of A, switch
# that branch to the root of the repository, then update the WC to
# r1.
svntest.actions.run_and_verify_svn(None, [], 'copy', A_path,
branch_path)
svntest.actions.run_and_verify_svn(None, [], 'ci', wc_dir,
'-m', 'Create a branch')
svntest.actions.run_and_verify_svn(None, [], 'sw', sbox.repo_url,
branch_path, '--ignore-ancestry')
# Now update the WC to r1.
svntest.actions.run_and_verify_svn(None, [], 'up', '-r1', wc_dir)
def different_node_kind(sbox):
"switch to a different node kind"
sbox.build(read_only = True)
os.chdir(sbox.wc_dir)
sbox.wc_dir = ''
pristine_disk = svntest.main.greek_state
pristine_status = svntest.actions.get_virginal_state(sbox.wc_dir, 1)
expected_disk = pristine_disk.copy()
expected_status = pristine_status.copy()
def switch_to_dir(sbox, rel_url, rel_path):
full_url = sbox.repo_url + '/' + rel_url
full_path = sbox.ospath(rel_path)
expected_disk.remove(rel_path)
expected_disk.add({ rel_path : pristine_disk.desc[rel_url] })
expected_disk.add_state(rel_path, pristine_disk.subtree(rel_url))
expected_status.tweak(rel_path, switched='S')
expected_status.add_state(rel_path, pristine_status.subtree(rel_url))
svntest.actions.run_and_verify_switch(sbox.wc_dir, full_path, full_url,
None, expected_disk, expected_status,
[], False,
'--ignore-ancestry')
svntest.actions.run_and_verify_svn(None, [], 'info', full_path)
if not os.path.isdir(full_path):
raise svntest.Failure
def switch_to_file(sbox, rel_url, rel_path):
full_url = sbox.repo_url + '/' + rel_url
full_path = sbox.ospath(rel_path)
expected_disk.remove_subtree(rel_path)
expected_disk.add({ rel_path : pristine_disk.desc[rel_url] })
expected_status.remove_subtree(rel_path)
expected_status.add({ rel_path : pristine_status.desc[rel_url] })
expected_status.tweak(rel_path, switched='S')
svntest.actions.run_and_verify_switch(sbox.wc_dir, full_path, full_url,
None, expected_disk, expected_status,
[], False,
'--ignore-ancestry')
svntest.actions.run_and_verify_svn(None, [], 'info', full_path)
if not os.path.isfile(full_path):
raise svntest.Failure
# Switch two files to dirs and two dirs to files.
# 'A/C' is an empty dir; 'A/D/G' is a non-empty dir.
switch_to_dir(sbox, 'A/C', 'iota')
switch_to_dir(sbox, 'A/D/G', 'A/D/gamma')
switch_to_file(sbox, 'iota', 'A/C')
switch_to_file(sbox, 'A/D/gamma', 'A/D/G')
@Issue(3332, 3333)
def switch_to_spaces(sbox):
"switch to a directory with spaces in its name"
sbox.build()
wc_dir = sbox.wc_dir
repo_url = sbox.repo_url
# Paths are normalized in the command processing, so %20 is equivalent to ' '
svntest.actions.run_and_verify_svn(None, [],
'cp', repo_url + '/A',
repo_url + '/A%20with space',
'-m', '')
svntest.actions.run_and_verify_svn(None, [],
'mv', repo_url + '/A%20with space',
repo_url + '/A with%20more spaces',
'-m', '')
expected_status = svntest.actions.get_virginal_state(wc_dir, 3)
expected_status.tweak('A', switched='S')
expected_status.tweak('', 'iota', wc_rev=1)
svntest.actions.run_and_verify_switch(sbox.wc_dir, sbox.ospath('A'),
repo_url + '/A%20with more%20spaces',
None, None, expected_status)
def switch_across_replacement(sbox):
"switch across a node replacement"
sbox.build()
os.chdir(sbox.wc_dir)
sbox.wc_dir = ''
# replacement
sbox.simple_rm('A/mu')
sbox.simple_append('A/mu', "This is the file 'mu'.\n", truncate=True)
sbox.simple_add('A/mu')
sbox.simple_commit() # r2
# When 'switch' of a dir brings in a replacement of a child file with no
# textual difference and ignoring ancestry, the switch doesn't report any
# incoming change at all, (and so won't raise a tree conflict if there is
# a local mod). 'update' on the other hand does report the replacement
# as expected.
# This test FAILs when using a Subversion 1.0-1.7 svnserve.
expected_output = svntest.wc.State(sbox.wc_dir, {
'A/mu' : Item(status='A ', prev_status='D '),
})
svntest.actions.run_and_verify_update(sbox.wc_dir,
expected_output, None, None,
[], False,
'-r1')
svntest.actions.run_and_verify_update(sbox.wc_dir,
expected_output, None, None,
[], False,
'-r2')
svntest.actions.run_and_verify_switch(sbox.wc_dir, sbox.ospath('A'), '^/A',
expected_output, None, None,
[], False,
'-r1')
@Issue(1975)
def switch_keywords(sbox):
"switch and svn:keywords"
sbox.build()
gamma_path = sbox.ospath('A/D/gamma')
psi_path = sbox.ospath('A/D/H/psi')
sbox.simple_propset('svn:keywords', 'URL', 'A/D/gamma')
svntest.main.file_write(gamma_path, "$URL$\n")
sbox.simple_propset('svn:keywords', 'URL', 'A/D/H/psi')
svntest.main.file_write(psi_path, "$URL$\n")
sbox.simple_commit()
expected_disk = svntest.main.greek_state.copy()
expected_disk.tweak('A/D/gamma',
contents="$URL: %s/A/D/gamma $\n" % sbox.repo_url)
expected_disk.tweak('A/D/H/psi',
contents="$URL: %s/A/D/H/psi $\n" % sbox.repo_url)
svntest.actions.run_and_verify_update(sbox.wc_dir,
None, expected_disk, None)
sbox.simple_copy('A', 'A_copy')
sbox.simple_commit()
sbox.simple_update()
# Next, we're going to switch A to A_copy, and expect keywords
# in the switched files gamma and psi to be updated accordingly.
expected_disk.add({
'A_copy/D/H/chi' : Item(contents="This is the file 'chi'.\n"),
'A_copy/D/H/psi' : Item(contents="$URL: %s/A_copy/D/H/psi $\n"
% sbox.repo_url),
'A_copy/D/H/omega' : Item(contents="This is the file 'omega'.\n"),
'A_copy/D/G/pi' : Item(contents="This is the file 'pi'.\n"),
'A_copy/D/G/tau' : Item(contents="This is the file 'tau'.\n"),
'A_copy/D/G/rho' : Item(contents="This is the file 'rho'.\n"),
'A_copy/D/gamma' : Item(contents="$URL: %s/A_copy/D/gamma $\n"
% sbox.repo_url),
'A_copy/B/F' : Item(),
'A_copy/B/E/alpha' : Item(contents="This is the file 'alpha'.\n"),
'A_copy/B/E/beta' : Item(contents="This is the file 'beta'.\n"),
'A_copy/B/lambda' : Item(contents="This is the file 'lambda'.\n"),
'A_copy/mu' : Item(contents="This is the file 'mu'.\n"),
'A_copy/C' : Item(),
})
# update expected URL for switched gamma
expected_disk.tweak('A/D/gamma',
contents="$URL: %s/A_copy/D/gamma $\n" % sbox.repo_url)
# leave gamma unmodified, locally modify psi
svntest.main.file_write(psi_path, "$URL$\nnew line\n")
# update expected URL for switched psi
expected_disk.tweak('A/D/H/psi',
contents="$URL: %s/A_copy/D/H/psi $\nnew line\n"
% sbox.repo_url)
expected_status = svntest.actions.get_virginal_state(sbox.wc_dir, 3)
expected_status.add({
'A_copy' : Item(status=' ', wc_rev='3'),
'A_copy/mu' : Item(status=' ', wc_rev='3'),
'A_copy/D' : Item(status=' ', wc_rev='3'),
'A_copy/D/H' : Item(status=' ', wc_rev='3'),
'A_copy/D/H/psi' : Item(status=' ', wc_rev='3'),
'A_copy/D/H/chi' : Item(status=' ', wc_rev='3'),
'A_copy/D/H/omega' : Item(status=' ', wc_rev='3'),
'A_copy/D/gamma' : Item(status=' ', wc_rev='3'),
'A_copy/D/G' : Item(status=' ', wc_rev='3'),
'A_copy/D/G/rho' : Item(status=' ', wc_rev='3'),
'A_copy/D/G/tau' : Item(status=' ', wc_rev='3'),
'A_copy/D/G/pi' : Item(status=' ', wc_rev='3'),
'A_copy/B' : Item(status=' ', wc_rev='3'),
'A_copy/B/E' : Item(status=' ', wc_rev='3'),
'A_copy/B/E/alpha' : Item(status=' ', wc_rev='3'),
'A_copy/B/E/beta' : Item(status=' ', wc_rev='3'),
'A_copy/B/F' : Item(status=' ', wc_rev='3'),
'A_copy/B/lambda' : Item(status=' ', wc_rev='3'),
'A_copy/C' : Item(status=' ', wc_rev='3'),
})
expected_status.tweak('A', switched='S')
expected_status.tweak('A/D/H/psi', status='M ')
# both gamma and psi should have update URLs after the switch
svntest.actions.run_and_verify_switch(sbox.wc_dir, sbox.ospath('A'), '^/A_copy',
None, expected_disk, expected_status)
@Issue(4524)
def switch_moves(sbox):
"switch moves on wc checkpoint"
sbox.build()
sbox.simple_move('A/B', 'B')
sbox.simple_rm('A')
branch_url = sbox.repo_url + '/branch'
svntest.actions.run_and_verify_svn(None, [],
'cp', sbox.wc_dir, branch_url,
'-m', '')
expected_disk = svntest.wc.State('', {
'B/E/alpha' : Item(contents="This is the file 'alpha'.\n"),
'B/E/beta' : Item(contents="This is the file 'beta'.\n"),
'B/lambda' : Item(contents="This is the file 'lambda'.\n"),
'B/F' : Item(),
'iota' : Item(contents="This is the file 'iota'.\n"),
})
expected_status = svntest.wc.State(sbox.wc_dir, {
'' : Item(status=' ', wc_rev='2'),
'B' : Item(status='R ', copied='+', treeconflict='C', wc_rev='-'),
'B/lambda' : Item(status=' ', copied='+', wc_rev='-'),
'B/F' : Item(status=' ', copied='+', wc_rev='-'),
'B/E' : Item(status=' ', copied='+', wc_rev='-'),
'B/E/beta' : Item(status=' ', copied='+', wc_rev='-'),
'B/E/alpha' : Item(status=' ', copied='+', wc_rev='-'),
'A' : Item(status='! ', treeconflict='C'),
'iota' : Item(status=' ', wc_rev='2'),
})
# In Subversion 1.8 this scenario causes an Sqlite row not found error.
# It would be nice if we could handle the tree conflict more intelligent, as
# the working copy matches the incomming change.
svntest.actions.run_and_verify_switch(sbox.wc_dir, sbox.ospath(''), branch_url,
None, expected_disk, expected_status)
########################################################################
# Run the tests
# list all tests here, starting with None:
test_list = [ None,
routine_switching,
commit_switched_things,
full_update,
full_rev_update,
update_switched_things,
rev_update_switched_things,
log_switched_file,
delete_subdir,
file_dir_file,
nonrecursive_switching,
failed_anchor_is_target,
bad_intermediate_urls,
obstructed_switch,
commit_mods_below_switch,
refresh_read_only_attribute,
switch_change_repos_root,
forced_switch,
forced_switch_failures,
switch_scheduled_add,
mergeinfo_switch_elision,
switch_with_obstructing_local_adds,
switch_with_depth,
switch_to_dir_with_peg_rev,
switch_urls_with_spaces,
switch_to_dir_with_peg_rev2,
switch_to_root,
tolerate_local_mods,
tree_conflicts_on_switch_1_1,
tree_conflicts_on_switch_1_2,
tree_conflicts_on_switch_2_1,
tree_conflicts_on_switch_2_2,
tree_conflicts_on_switch_3,
copy_with_switched_subdir,
up_to_old_rev_with_subtree_switched_to_root,
different_node_kind,
switch_to_spaces,
switch_across_replacement,
switch_keywords,
switch_moves,
]
if __name__ == '__main__':
svntest.main.run_tests(test_list)
# NOTREACHED
### End of file.
| 38.512287 | 88 | 0.549393 |
#
# Subversion is a tool for revision control.
# See http://subversion.apache.org for more information.
#
# ====================================================================
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
######################################################################
# General modules
import shutil, re, os
# Our testing module
import svntest
from svntest import verify, actions, main, deeptrees
# (abbreviation)
Skip = svntest.testcase.Skip_deco
SkipUnless = svntest.testcase.SkipUnless_deco
XFail = svntest.testcase.XFail_deco
Issues = svntest.testcase.Issues_deco
Issue = svntest.testcase.Issue_deco
Wimp = svntest.testcase.Wimp_deco
Item = svntest.wc.StateItem
from svntest.main import SVN_PROP_MERGEINFO, server_has_mergeinfo
from svntest.deeptrees import do_routine_switching, commit_routine_switching, \
get_routine_disk_state, get_routine_status_state
######################################################################
# Tests
#
#----------------------------------------------------------------------
def routine_switching(sbox):
sbox.build(read_only = True)
# Setup (and verify) some switched things
do_routine_switching(sbox.wc_dir, sbox.repo_url, 1)
#----------------------------------------------------------------------
def commit_switched_things(sbox):
sbox.build()
wc_dir = sbox.wc_dir
# Setup some switched things (don't bother verifying)
do_routine_switching(wc_dir, sbox.repo_url, 0)
commit_routine_switching(wc_dir, 1)
def full_update(sbox):
sbox.build()
wc_dir = sbox.wc_dir
do_routine_switching(wc_dir, sbox.repo_url, 0)
# Copy wc_dir to a backup location
wc_backup = sbox.add_wc_path('backup')
svntest.actions.duplicate_dir(wc_dir, wc_backup)
# Commit some stuff (don't bother verifying)
commit_routine_switching(wc_backup, 0)
iota_path = sbox.ospath('iota')
gamma_path = sbox.ospath('A/D/gamma')
Bpi_path = sbox.ospath('A/B/pi')
BZ_path = sbox.ospath('A/B/Z')
Bzeta_path = sbox.ospath('A/B/Z/zeta')
Gpi_path = sbox.ospath('A/D/G/pi')
GZ_path = sbox.ospath('A/D/G/Z')
Gzeta_path = sbox.ospath('A/D/G/Z/zeta')
expected_output = svntest.wc.State(wc_dir, {
'iota' : Item(status='U '),
'A/D/gamma' : Item(status='U '),
'A/B/pi' : Item(status='U '),
'A/B/Z' : Item(status='A '),
'A/B/Z/zeta' : Item(status='A '),
'A/D/G/pi' : Item(status='U '),
'A/D/G/Z' : Item(status='A '),
'A/D/G/Z/zeta' : Item(status='A '),
})
expected_disk = get_routine_disk_state(wc_dir)
expected_disk.tweak('iota', contents="This is the file 'gamma'.\napple")
expected_disk.tweak('A/D/gamma', contents="This is the file 'gamma'.\napple")
expected_disk.tweak('A/B/pi', contents="This is the file 'pi'.\nmelon")
expected_disk.tweak('A/D/G/pi', contents="This is the file 'pi'.\nmelon")
expected_disk.add({
'A/B/Z' : Item(),
'A/B/Z/zeta' : Item(contents="This is the file 'zeta'.\n"),
'A/D/G/Z' : Item(),
'A/D/G/Z/zeta' : Item(contents="This is the file 'zeta'.\n"),
})
expected_status = get_routine_status_state(wc_dir)
expected_status.tweak(wc_rev=2)
expected_status.add({
'A/D/G/Z' : Item(status=' ', wc_rev=2),
'A/D/G/Z/zeta' : Item(status=' ', wc_rev=2),
'A/B/Z' : Item(status=' ', wc_rev=2),
'A/B/Z/zeta' : Item(status=' ', wc_rev=2),
})
expected_status.tweak('iota', 'A/B', switched='S')
svntest.actions.run_and_verify_update(wc_dir,
expected_output,
expected_disk,
expected_status)
def full_rev_update(sbox):
sbox.build()
wc_dir = sbox.wc_dir
do_routine_switching(wc_dir, sbox.repo_url, 0)
# Commit some stuff (don't bother verifying)
commit_routine_switching(wc_dir, 0)
svntest.main.run_svn(None, 'up', wc_dir)
iota_path = sbox.ospath('iota')
gamma_path = sbox.ospath('A/D/gamma')
Bpi_path = sbox.ospath('A/B/pi')
BZ_path = sbox.ospath('A/B/Z')
Gpi_path = sbox.ospath('A/D/G/pi')
GZ_path = sbox.ospath('A/D/G/Z')
expected_output = svntest.wc.State(wc_dir, {
'iota' : Item(status='U '),
'A/D/gamma' : Item(status='U '),
'A/B/pi' : Item(status='U '),
'A/B/Z' : Item(status='D '),
'A/D/G/pi' : Item(status='U '),
'A/D/G/Z' : Item(status='D '),
})
expected_disk = get_routine_disk_state(wc_dir)
expected_status = get_routine_status_state(wc_dir)
expected_status.tweak('iota', 'A/B', switched='S')
svntest.actions.run_and_verify_update(wc_dir,
expected_output,
expected_disk,
expected_status,
[], True,
'-r', '1', wc_dir)
def update_switched_things(sbox):
sbox.build()
wc_dir = sbox.wc_dir
do_routine_switching(wc_dir, sbox.repo_url, 0)
# Copy wc_dir to a backup location
wc_backup = sbox.add_wc_path('backup')
svntest.actions.duplicate_dir(wc_dir, wc_backup)
# Commit some stuff (don't bother verifying)
commit_routine_switching(wc_backup, 0)
iota_path = sbox.ospath('iota')
B_path = sbox.ospath('A/B')
expected_output = svntest.wc.State(wc_dir, {
'iota' : Item(status='U '),
'A/B/pi' : Item(status='U '),
'A/B/Z' : Item(status='A '),
'A/B/Z/zeta' : Item(status='A '),
})
expected_disk = get_routine_disk_state(wc_dir)
expected_disk.tweak('iota', contents="This is the file 'gamma'.\napple")
expected_disk.tweak('A/B/pi', contents="This is the file 'pi'.\nmelon")
expected_disk.add({
'A/B/Z' : Item(),
'A/B/Z/zeta' : Item("This is the file 'zeta'.\n"),
})
expected_status = get_routine_status_state(wc_dir)
expected_status.tweak('iota', 'A/B', switched='S')
expected_status.tweak('A/B', 'A/B/pi', 'A/B/rho', 'A/B/tau', 'iota',
wc_rev=2)
expected_status.add({
'A/B/Z' : Item(status=' ', wc_rev=2),
'A/B/Z/zeta' : Item(status=' ', wc_rev=2),
})
svntest.actions.run_and_verify_update(wc_dir,
expected_output,
expected_disk,
expected_status,
[], False,
B_path,
iota_path)
def rev_update_switched_things(sbox):
sbox.build()
wc_dir = sbox.wc_dir
do_routine_switching(wc_dir, sbox.repo_url, 0)
# Commit some stuff (don't bother verifying)
commit_routine_switching(wc_dir, 0)
iota_path = sbox.ospath('iota')
B_path = sbox.ospath('A/B')
svntest.main.run_svn(None, 'up', wc_dir)
expected_output = svntest.wc.State(wc_dir, {
'iota' : Item(status='U '),
'A/B/pi' : Item(status='U '),
'A/B/Z' : Item(status='D '),
})
expected_disk = get_routine_disk_state(wc_dir)
expected_disk.tweak('A/D/gamma', contents="This is the file 'gamma'.\napple")
expected_disk.tweak('A/D/G/pi', contents="This is the file 'pi'.\nmelon")
expected_disk.add({
'A/D/G/Z' : Item(),
'A/D/G/Z/zeta' : Item("This is the file 'zeta'.\n"),
})
expected_status = get_routine_status_state(wc_dir)
expected_status.tweak(wc_rev=2)
expected_status.tweak('iota', 'A/B', switched='S')
expected_status.tweak('A/B', 'A/B/pi', 'A/B/rho', 'A/B/tau', 'iota',
wc_rev=1)
expected_status.add({
'A/D/G/Z' : Item(status=' ', wc_rev=2),
'A/D/G/Z/zeta' : Item(status=' ', wc_rev=2),
})
svntest.actions.run_and_verify_update(wc_dir,
expected_output,
expected_disk,
expected_status,
[], True,
'-r', '1',
B_path,
iota_path)
def log_switched_file(sbox):
sbox.build()
wc_dir = sbox.wc_dir
do_routine_switching(wc_dir, sbox.repo_url, 0)
# edit and commit switched file 'iota'
iota_path = sbox.ospath('iota')
svntest.main.run_svn(None, 'ps', 'x', 'x', iota_path)
svntest.main.run_svn(None,
'ci', '-m',
'set prop on switched iota',
iota_path)
# log switched file 'iota'
exit_code, output, error = svntest.main.run_svn(None, 'log', iota_path)
for line in output:
if line.find("set prop on switched iota") != -1:
break
else:
raise svntest.Failure
#----------------------------------------------------------------------
def delete_subdir(sbox):
sbox.build()
wc_dir = sbox.wc_dir
A_path = sbox.ospath('A')
A_url = sbox.repo_url + '/A'
A2_url = sbox.repo_url + '/A2'
A2_B_F_url = sbox.repo_url + '/A2/B/F'
svntest.actions.run_and_verify_svn(['Committing transaction...\n',
'Committed revision 2.\n'], [],
'cp', '-m', 'make copy', A_url, A2_url)
svntest.actions.run_and_verify_svn(['Committing transaction...\n',
'Committed revision 3.\n'], [],
'rm', '-m', 'delete subdir', A2_B_F_url)
expected_output = svntest.wc.State(wc_dir, {
'A/B/F' : Item(status='D '),
})
expected_disk = svntest.main.greek_state.copy()
expected_disk.remove('A/B/F')
expected_status = svntest.actions.get_virginal_state(wc_dir, 3)
expected_status.tweak('A', switched='S')
expected_status.remove('A/B/F')
expected_status.tweak('', 'iota', wc_rev=1)
# Used to fail with a 'directory not locked' error for A/B/F
svntest.actions.run_and_verify_switch(wc_dir, A_path, A2_url,
expected_output,
expected_disk,
expected_status,
[], False,
'--ignore-ancestry')
#----------------------------------------------------------------------
# Issue 1532: Switch a file to a dir: can't switch it back to the file
@XFail()
@Issue(1532)
def file_dir_file(sbox):
sbox.build(read_only = True)
wc_dir = sbox.wc_dir
file_path = sbox.ospath('iota')
file_url = sbox.repo_url + '/iota'
dir_url = sbox.repo_url + '/A/C'
svntest.actions.run_and_verify_svn(None, [], 'switch',
'--ignore-ancestry', dir_url, file_path)
if not os.path.isdir(file_path):
raise svntest.Failure
svntest.actions.run_and_verify_svn(None, [], 'switch',
'--ignore-ancestry', file_url, file_path)
if not os.path.isfile(file_path):
raise svntest.Failure
def nonrecursive_switching(sbox):
sbox.build()
wc1_dir = sbox.wc_dir
wc2_dir = os.path.join(wc1_dir, 'wc2')
trunk_url = sbox.repo_url + '/A'
branch_url = sbox.repo_url + '/branch'
version1_url = branch_url + '/version1'
wc1_new_file = os.path.join(wc1_dir, 'branch', 'version1', 'newfile')
wc2_new_file = os.path.join(wc2_dir, 'newfile')
wc2_mu_file = os.path.join(wc2_dir, 'mu')
wc2_B_dir = os.path.join(wc2_dir, 'B')
wc2_C_dir = os.path.join(wc2_dir, 'C')
wc2_D_dir = os.path.join(wc2_dir, 'D')
svntest.main.run_svn(None, 'co', trunk_url, wc2_dir)
svntest.main.run_svn(None,
'mkdir', '-m', '', branch_url)
svntest.main.run_svn(None,
'cp', '-m', '', trunk_url, version1_url)
svntest.main.run_svn(None,
'up', wc1_dir)
svntest.main.file_append(wc1_new_file, "This is the file 'newfile'.\n")
svntest.main.run_svn(None, 'add', wc1_new_file)
sbox.simple_commit()
svntest.actions.run_and_verify_svn(None, [], 'switch', '-N',
'--ignore-ancestry', version1_url, wc2_dir)
expected_infos = [
{ 'URL' : '.*/A/B$' },
{ 'URL' : '.*/A/C$' },
{ 'URL' : '.*/A/D$' },
]
svntest.actions.run_and_verify_info(expected_infos,
wc2_B_dir, wc2_C_dir, wc2_D_dir)
# is bad, and shows "S" when mu's URL is wrong.)
expected_infos = [
{ 'URL' : '.*/branch/version1/mu$' },
{ 'URL' : '.*/branch/version1/newfile$' },
]
svntest.actions.run_and_verify_info(expected_infos,
wc2_mu_file, wc2_new_file)
def failed_anchor_is_target(sbox):
sbox.build()
wc_dir = sbox.wc_dir
G_url = sbox.repo_url + '/A/D/G'
G_psi_url = G_url + '/psi'
svntest.actions.run_and_verify_svn(['Committing transaction...\n',
'Committed revision 2.\n'], [],
'mkdir', '-m', 'log msg', G_psi_url)
H_path = sbox.ospath('A/D/H')
psi_path = os.path.join(H_path, 'psi')
svntest.main.file_append(psi_path, "more text")
svntest.actions.run_and_verify_svn(svntest.verify.AnyOutput, [],
'switch', '--ignore-ancestry',
G_url, H_path)
expected_status = svntest.actions.get_virginal_state(wc_dir, 1)
expected_status.tweak('A/D/H', switched='S', wc_rev=2)
expected_status.tweak('A/D/H/psi', status='R ', copied='+',
wc_rev='-', treeconflict='C')
expected_status.remove('A/D/H/chi', 'A/D/H/omega')
expected_status.add({
'A/D/H/pi' : Item(status=' ', wc_rev=2),
'A/D/H/tau' : Item(status=' ', wc_rev=2),
'A/D/H/rho' : Item(status=' ', wc_rev=2),
})
svntest.actions.run_and_verify_status(wc_dir, expected_status)
expected_infos = [
{ 'URL' : '.*' + G_url + '$' },
]
svntest.actions.run_and_verify_info(expected_infos, H_path)
svntest.actions.run_and_verify_resolved([psi_path])
est.actions.run_and_verify_status(wc_dir, expected_status)
wc_dir
url = sbox.repo_url
A = sbox.ospath('A')
A_Z = sbox.ospath('A/Z')
url_A_C = url + '/A/C'
url_A_C_A = url + '/A/C/A'
url_A_C_A_Z = url + '/A/C/A/Z'
# First, make an extra subdirectory in C to match one in the root, plus
# another one inside of that.
svntest.actions.run_and_verify_svn(['Committing transaction...\n',
'Committed revision 2.\n'], [],
'mkdir', '-m', 'log msg',
url_A_C_A, url_A_C_A_Z)
# Now, we'll drop a conflicting path under the root.
svntest.main.file_append(A_Z, 'Look, Mom, a ... tree conflict.')
# svn switch url/A/C wc_dir
# # svn info A
# # check that we can recover from the tree conflict
# rm A/Z
# svn up
# """)
expected_output = svntest.wc.State(wc_dir, {
'A/mu' : Item(status='D '),
'A/Z' : Item(status=' ', treeconflict='C'),
'A/C' : Item(status='D '),
'A/B' : Item(status='D '),
'A/D' : Item(status='D '),
'iota' : Item(status='D '),
})
expected_disk = svntest.main.greek_state.copy()
expected_disk.remove('iota', 'A/B', 'A/B/E', 'A/B/E/beta', 'A/B/E/alpha',
'A/B/F', 'A/B/lambda', 'A/D', 'A/D/G', 'A/D/G/rho', 'A/D/G/pi',
'A/D/G/tau', 'A/D/H', 'A/D/H/psi', 'A/D/H/omega', 'A/D/H/chi',
'A/D/gamma', 'A/mu', 'A/C')
expected_disk.add({
'A/Z' : Item(contents="Look, Mom, a ... tree conflict."),
})
expected_status = actions.get_virginal_state(wc_dir, 2)
expected_status.remove('iota', 'A/B', 'A/B/E', 'A/B/E/beta', 'A/B/E/alpha',
'A/B/F', 'A/B/lambda', 'A/D', 'A/D/G', 'A/D/G/rho', 'A/D/G/pi',
'A/D/G/tau', 'A/D/H', 'A/D/H/psi', 'A/D/H/omega', 'A/D/H/chi',
'A/D/gamma', 'A/mu', 'A/C')
expected_status.add({
'A/Z' : Item(status='D ', treeconflict='C', wc_rev=2),
})
actions.run_and_verify_switch(wc_dir, wc_dir, url_A_C, expected_output,
expected_disk, expected_status,
[], False,
'--ignore-ancestry')
expected_infos = [
{ 'URL' : '.*/A/C/A$' },
]
svntest.actions.run_and_verify_info(expected_infos, A)
os.remove(A_Z)
svntest.main.run_svn(None, 'revert', A_Z)
expected_output = svntest.wc.State(wc_dir, {
})
expected_disk.tweak('A/Z', contents=None)
expected_status.tweak(status=' ', wc_rev='2')
expected_status.tweak('A/Z', treeconflict=None)
actions.run_and_verify_update(wc_dir, expected_output, expected_disk,
expected_status)
itch(sbox):
# svn rm A/B/E/alpha
# svn commit
# echo "hello" >> A/B/E/alpha
# svn switch url/A/B/Esave A/B/E
# svn status
# svn info A/B/E/alpha""")
sbox.build()
wc_dir = sbox.wc_dir
url = sbox.repo_url
A_B_E = sbox.ospath('A/B/E')
A_B_E_alpha = sbox.ospath('A/B/E/alpha')
url_A_B_E = url + '/A/B/E'
url_A_B_Esave = url + '/A/B/Esave'
expected_stdout = [
'Committing transaction...\n',
'Committed revision 2.\n',
]
actions.run_and_verify_svn2(expected_stdout, [], 0, 'cp', '-m',
'msgcopy', url_A_B_E, url_A_B_Esave)
expected_stdout = ['D ' + A_B_E_alpha + '\n']
actions.run_and_verify_svn2(expected_stdout, [], 0, 'rm',
A_B_E_alpha)
expected_output = svntest.wc.State(wc_dir, {
'A/B/E/alpha' : Item(verb='Deleting'),
})
expected_status = actions.get_virginal_state(wc_dir, 1)
expected_status.remove('A/B/E/alpha')
actions.run_and_verify_commit(wc_dir, expected_output, expected_status)
main.file_append(A_B_E_alpha, 'hello')
expected_output = svntest.wc.State(wc_dir, {
'A/B/E/alpha' : Item(status=' ', treeconflict='C'),
})
expected_disk = svntest.main.greek_state.copy()
expected_disk.tweak('A/B/E/alpha', contents='hello')
expected_status.add({
'A/B/E/alpha' : Item(status='D ', treeconflict='C', wc_rev=3),
})
expected_status.tweak('A/B/E', wc_rev='3', switched='S')
expected_status.tweak('A/B/E/beta', wc_rev='3')
actions.run_and_verify_switch(wc_dir, A_B_E, url_A_B_Esave,
expected_output, expected_disk,
expected_status,
[], False, '--ignore-ancestry')
expected_status.add({
'A/B/Esave' : Item(status=' '),
'A/B/Esave/beta' : Item(status=' '),
'A/B/Esave/alpha' : Item(status=' '),
})
actions.run_and_verify_unquiet_status(wc_dir, expected_status)
expected_stdout = verify.RegexOutput(
".*local file unversioned, incoming file add upon switch",
match_all=False)
actions.run_and_verify_svn2(expected_stdout, [], 0, 'info',
A_B_E_alpha)
def commit_mods_below_switch(sbox):
sbox.build()
wc_dir = sbox.wc_dir
C_path = sbox.ospath('A/C')
B_url = sbox.repo_url + '/A/B'
expected_output = svntest.wc.State(wc_dir, {
'A/C/E' : Item(status='A '),
'A/C/E/alpha' : Item(status='A '),
'A/C/E/beta' : Item(status='A '),
'A/C/F' : Item(status='A '),
'A/C/lambda' : Item(status='A '),
})
expected_disk = svntest.main.greek_state.copy()
expected_disk.add({
'A/C/E' : Item(),
'A/C/E/alpha' : Item(contents="This is the file 'alpha'.\n"),
'A/C/E/beta' : Item(contents="This is the file 'beta'.\n"),
'A/C/F' : Item(),
'A/C/lambda' : Item(contents="This is the file 'lambda'.\n"),
})
expected_status = svntest.actions.get_virginal_state(wc_dir, 1)
expected_status.tweak('A/C', switched='S')
expected_status.add({
'A/C/E' : Item(status=' ', wc_rev=1),
'A/C/E/alpha' : Item(status=' ', wc_rev=1),
'A/C/E/beta' : Item(status=' ', wc_rev=1),
'A/C/F' : Item(status=' ', wc_rev=1),
'A/C/lambda' : Item(status=' ', wc_rev=1),
})
svntest.actions.run_and_verify_switch(wc_dir, C_path, B_url,
expected_output,
expected_disk,
expected_status,
[],
False, '--ignore-ancestry')
D_path = sbox.ospath('A/D')
svntest.actions.run_and_verify_svn(None, [],
'propset', 'x', 'x', C_path, D_path)
expected_status.tweak('A/C', 'A/D', status=' M')
svntest.actions.run_and_verify_status(wc_dir, expected_status)
expected_output = svntest.wc.State(wc_dir, {
'A/C' : Item(verb='Sending'),
'A/D' : Item(verb='Sending'),
})
expected_status.tweak('A/C', 'A/D', status=' ', wc_rev=2)
svntest.actions.run_and_verify_commit(wc_dir,
expected_output, expected_status,
[], C_path, D_path)
def refresh_read_only_attribute(sbox):
# behavior, just skip the test.
if os.name == 'posix':
if os.geteuid() == 0:
raise svntest.Skip('Test doesn\'t work as uid 0')
sbox.build()
wc_dir = sbox.wc_dir
url = sbox.repo_url + '/A'
branch_url = sbox.repo_url + '/A-branch'
svntest.actions.run_and_verify_svn(['Committing transaction...\n',
'Committed revision 2.\n'], [],
'cp', '-m', 'svn:needs-lock not set',
url, branch_url)
A_path = sbox.ospath('A')
mu_path = os.path.join(A_path, 'mu')
svntest.actions.run_and_verify_svn(None, [],
'ps', 'svn:needs-lock', '1', mu_path)
expected_output = svntest.wc.State(wc_dir, {
'A/mu' : Item(verb='Sending'),
})
expected_status = svntest.actions.get_virginal_state(wc_dir, 1)
expected_status.tweak('A/mu', wc_rev=3)
svntest.actions.run_and_verify_commit(wc_dir,
expected_output, expected_status,
[], mu_path)
if os.access(mu_path, os.W_OK):
raise svntest.Failure("'%s' expected to be read-only after having had "
"its svn:needs-lock property set" % mu_path)
expected_output = svntest.wc.State(wc_dir, {
'A/mu' : Item(status=' U'),
})
expected_disk = svntest.main.greek_state.copy()
expected_status = svntest.actions.get_virginal_state(wc_dir, 3)
expected_status.tweak('', wc_rev=1)
expected_status.tweak('iota', wc_rev=1)
expected_status.tweak('A', switched='S')
svntest.actions.run_and_verify_switch(wc_dir, A_path, branch_url,
expected_output,
expected_disk,
expected_status,
[],
False, '--ignore-ancestry')
if not os.access(mu_path, os.W_OK):
raise svntest.Failure("'%s' expected to be writable after being switched "
"to a branch on which its svn:needs-lock property "
"is not set" % mu_path)
def switch_change_repos_root(sbox):
sbox.build()
wc_dir = sbox.wc_dir
repo_url = sbox.repo_url
other_repo_url = repo_url
# Strip trailing slashes and add something bogus to that other URL.
while other_repo_url[-1] == '/':
other_repos_url = other_repo_url[:-1]
other_repo_url = other_repo_url + "_bogus"
other_A_url = other_repo_url + "/A"
A_wc_dir = sbox.ospath('A')
# Test 1: A switch that changes to a non-existing repo shouldn't work.
expected_err = ".*Unable to open repository.*|.*Could not open.*|"\
".*Could not find.*|.*No repository found.*"
svntest.actions.run_and_verify_svn(None,
expected_err,
'switch', '--ignore-ancestry',
other_A_url, A_wc_dir)
other_repo_dir, other_repo_url = sbox.add_repo_path('other')
other_A_url = other_repo_url + "/A"
svntest.main.create_repos(other_repo_dir)
svntest.actions.run_and_verify_svn(None,
".*UUID.*",
'switch', '--ignore-ancestry',
other_A_url, A_wc_dir)
# Make sure we didn't break the WC.
expected_status = svntest.actions.get_virginal_state(wc_dir, 1)
svntest.actions.run_and_verify_status(wc_dir, expected_status)
def forced_switch(sbox):
sbox.build(read_only = True)
G_path = sbox.ospath('A/B/F/G')
os.mkdir(G_path)
shutil.copyfile(sbox.ospath('A/D/gamma'),
sbox.ospath('A/B/F/gamma'))
shutil.copyfile(sbox.ospath('A/D/G/tau'),
sbox.ospath('A/B/F/G/tau'))
pi_path = sbox.ospath('A/B/F/G/pi')
svntest.main.file_write(pi_path,
"This is the OBSTRUCTING file 'pi'.\n")
I_path = sbox.ospath('A/B/F/I')
os.mkdir(I_path)
upsilon_path = os.path.join(G_path, 'upsilon')
svntest.main.file_write(upsilon_path,
"This is the unversioned file 'upsilon'.\n")
expected_output = svntest.wc.State(sbox.wc_dir, {
"A/B/F/gamma" : Item(status='E '),
"A/B/F/G" : Item(status='E '),
"A/B/F/G/pi" : Item(status='E '),
"A/B/F/G/rho" : Item(status='A '),
"A/B/F/G/tau" : Item(status='E '),
"A/B/F/H" : Item(status='A '),
"A/B/F/H/chi" : Item(status='A '),
"A/B/F/H/omega" : Item(status='A '),
"A/B/F/H/psi" : Item(status='A '),
})
expected_disk = svntest.main.greek_state.copy()
expected_disk.add({
"A/B/F/gamma" : Item("This is the file 'gamma'.\n"),
"A/B/F/G" : Item(),
"A/B/F/G/pi" : Item("This is the OBSTRUCTING file 'pi'.\n"),
"A/B/F/G/rho" : Item("This is the file 'rho'.\n"),
"A/B/F/G/tau" : Item("This is the file 'tau'.\n"),
"A/B/F/G/upsilon" : Item("This is the unversioned file 'upsilon'.\n"),
"A/B/F/H" : Item(),
"A/B/F/H/chi" : Item("This is the file 'chi'.\n"),
"A/B/F/H/omega" : Item("This is the file 'omega'.\n"),
"A/B/F/H/psi" : Item("This is the file 'psi'.\n"),
"A/B/F/I" : Item(),
})
expected_status = svntest.actions.get_virginal_state(sbox.wc_dir, 1)
expected_status.tweak('A/B/F', switched='S')
expected_status.add({
"A/B/F/gamma" : Item(status=' ', wc_rev=1),
"A/B/F/G" : Item(status=' ', wc_rev=1),
"A/B/F/G/pi" : Item(status='M ', wc_rev=1),
"A/B/F/G/rho" : Item(status=' ', wc_rev=1),
"A/B/F/G/tau" : Item(status=' ', wc_rev=1),
"A/B/F/H" : Item(status=' ', wc_rev=1),
"A/B/F/H/chi" : Item(status=' ', wc_rev=1),
"A/B/F/H/omega" : Item(status=' ', wc_rev=1),
"A/B/F/H/psi" : Item(status=' ', wc_rev=1),
})
F_path = sbox.ospath('A/B/F')
AD_url = sbox.repo_url + '/A/D'
svntest.actions.run_and_verify_switch(sbox.wc_dir, F_path, AD_url,
expected_output,
expected_disk,
expected_status, [], False,
'--force', '--ignore-ancestry')
def forced_switch_failures(sbox):
# # Add a directory to obstruct a file.
# mkdir A/B/F/pi
#
# # Add a file to obstruct a directory.
# echo "The file 'H'" > A/C/H
#
# # Test three cases where forced switch should cause a tree conflict
#
# # 1) A forced switch that tries to add a file when an unversioned
# # directory of the same name already exists. (Currently fails)
# svn switch --force url/A/D A/C
#
# # 2) A forced switch that tries to add a dir when a file of the same
# # name already exists. (Tree conflict)
# svn switch --force url/A/D/G A/B/F
# svn info A/B/F/pi
#
# # 3) A forced update that tries to add a directory when a versioned
# # directory of the same name already exists.
#
# # Make dir A/D/H/I in repos.
# svn mkdir -m "Log message" url/A/D/H/I
#
# # Make A/D/G/I and co A/D/H/I into it.
# mkdir A/D/G/I
# svn co url/A/D/H/I A/D/G/I
#
# # Try the forced switch. A/D/G/I obstructs the dir A/D/G/I coming
# # from the repos, causing an error.
# svn switch --force url/A/D/H A/D/G
#
# # Delete all three obstructions and finish the update.
# rm -rf A/D/G/I
# rm A/B/F/pi
# rm A/C/H
#
# # A/B/F is switched to A/D/G
# # A/C is switched to A/D
# # A/D/G is switched to A/D/H
# svn up
# """)
sbox.build()
wc_dir = sbox.wc_dir
url = sbox.repo_url
A_B_F = sbox.ospath('A/B/F')
A_B_F_pi = sbox.ospath('A/B/F/pi')
A_C = sbox.ospath('A/C')
A_C_H = sbox.ospath('A/C/H')
A_D_G = sbox.ospath('A/D/G')
A_D_G_I = sbox.ospath('A/D/G/I')
url_A_D = url + '/A/D'
url_A_D_G = url + '/A/D/G'
url_A_D_H = url + '/A/D/H'
url_A_D_H_I = url + '/A/D/H/I'
os.makedirs(A_B_F_pi)
main.file_write(A_C_H, "The file 'H'\n")
expected_output = svntest.wc.State(wc_dir, {
'A/C/G' : Item(status='A '),
'A/C/G/pi' : Item(status='A '),
'A/C/G/rho' : Item(status='A '),
'A/C/G/tau' : Item(status='A '),
'A/C/gamma' : Item(status='A '),
'A/C/H' : Item(status=' ', treeconflict='C'),
'A/C/H/psi' : Item(status=' ', treeconflict='A'),
'A/C/H/omega' : Item(status=' ', treeconflict='A'),
'A/C/H/chi' : Item(status=' ', treeconflict='A'),
})
expected_disk = svntest.main.greek_state.copy()
expected_disk.add({
'A/C/gamma' : Item(contents="This is the file 'gamma'.\n"),
'A/C/G' : Item(),
'A/C/G/pi' : Item(contents="This is the file 'pi'.\n"),
'A/C/G/rho' : Item(contents="This is the file 'rho'.\n"),
'A/C/G/tau' : Item(contents="This is the file 'tau'.\n"),
'A/C/H' : Item(contents="The file 'H'\n"),
'A/B/F/pi' : Item(),
})
expected_status = actions.get_virginal_state(wc_dir, 1)
expected_status.add({
'A/C/G' : Item(status=' ', wc_rev='1'),
'A/C/G/rho' : Item(status=' ', wc_rev='1'),
'A/C/G/tau' : Item(status=' ', wc_rev='1'),
'A/C/G/pi' : Item(status=' ', wc_rev='1'),
'A/C/H' : Item(status='D ', treeconflict='C', wc_rev='1'),
'A/C/H/psi' : Item(status='D ', wc_rev='1'),
'A/C/H/omega' : Item(status='D ', wc_rev='1'),
'A/C/H/chi' : Item(status='D ', wc_rev='1'),
'A/C/gamma' : Item(status=' ', wc_rev='1'),
})
expected_status.tweak('A/C', switched='S')
actions.run_and_verify_switch(wc_dir, A_C, url_A_D, expected_output,
expected_disk, expected_status, [], False,
'--force',
'--ignore-ancestry')
expected_output = svntest.wc.State(wc_dir, {
'A/B/F/rho' : Item(status='A '),
'A/B/F/pi' : Item(status=' ', treeconflict='C'),
'A/B/F/tau' : Item(status='A '),
})
expected_disk.add({
'A/B/F/rho' : Item(contents="This is the file 'rho'.\n"),
'A/B/F/tau' : Item(contents="This is the file 'tau'.\n"),
})
expected_status.add({
'A/B/F/tau' : Item(status=' ', wc_rev='1'),
'A/B/F/pi' : Item(status='D ', treeconflict='C', wc_rev='1'),
'A/B/F/rho' : Item(status=' ', wc_rev='1'),
})
expected_status.tweak('A/B/F', switched='S')
actions.run_and_verify_switch(wc_dir, A_B_F, url_A_D_G, expected_output,
expected_disk, expected_status, [], False,
'--force',
'--ignore-ancestry')
expected_stdout = verify.ExpectedOutput(
'Tree conflict: local dir unversioned, incoming file add upon switch\n',
match_all=False)
actions.run_and_verify_svn2(expected_stdout, [], 0, 'info',
A_B_F_pi)
expected_stdout = verify.UnorderedOutput([
'Committing transaction...\n',
'Committed revision 2.\n',
])
actions.run_and_verify_svn2(expected_stdout, [], 0, 'mkdir',
'-m', 'Log message', url_A_D_H_I)
os.makedirs(A_D_G_I)
expected_output = svntest.wc.State(wc_dir, {})
expected_disk.add({
'A/D/G/I' : Item(),
})
exit_code, so, se = svntest.actions.run_and_verify_svn(
['Checked out revision 2.\n'], [],
"co", url_A_D_H_I, A_D_G_I)
expected_output = svntest.wc.State(wc_dir, {
'A/D/G/chi' : Item(status='A '),
'A/D/G/tau' : Item(status='D '),
'A/D/G/omega' : Item(status='A '),
'A/D/G/psi' : Item(status='A '),
'A/D/G/I' : Item(verb='Skipped'),
'A/D/G/rho' : Item(status='D '),
'A/D/G/pi' : Item(status='D '),
})
actions.run_and_verify_switch(wc_dir, A_D_G, url_A_D_H, expected_output,
None, None, [], False,
'--force', '--ignore-ancestry')
main.safe_rmtree(A_D_G_I)
main.safe_rmtree(A_B_F_pi)
os.remove(A_C_H)
svntest.main.run_svn(None, 'resolved', A_C_H)
svntest.main.run_svn(None, 'revert', A_B_F_pi)
expected_output = svntest.wc.State(wc_dir, {
'A/C/H/I' : Item(status='A '),
'A/D/G/I' : Item(status='A '),
'A/D/H/I' : Item(status='A '),
})
expected_disk.remove('A/D/G/tau', 'A/D/G/rho', 'A/D/G/pi')
expected_disk.add({
'A/D/H/I' : Item(),
'A/D/G/omega' : Item(contents="This is the file 'omega'.\n"),
'A/D/G/psi' : Item(contents="This is the file 'psi'.\n"),
'A/D/G/chi' : Item(contents="This is the file 'chi'.\n"),
'A/C/H/I' : Item(),
'A/C/H/omega' : Item(contents="This is the file 'omega'.\n"),
'A/C/H/psi' : Item(contents="This is the file 'psi'.\n"),
'A/C/H/chi' : Item(contents="This is the file 'chi'.\n"),
})
expected_disk.tweak('A/C/H', contents=None)
expected_disk.tweak('A/B/F/pi', contents="This is the file 'pi'.\n")
expected_status.remove('A/D/G/tau', 'A/D/G/rho', 'A/D/G/pi')
expected_status.add({
'A/D/G/omega' : Item(status=' ', wc_rev='2'),
'A/D/G/I' : Item(status=' ', wc_rev='2'),
'A/D/G/psi' : Item(status=' ', wc_rev='2'),
'A/D/G/chi' : Item(status=' ', wc_rev='2'),
'A/D/H/I' : Item(status=' ', wc_rev='2'),
'A/C/H/psi' : Item(status=' ', wc_rev='2'),
'A/C/H/omega' : Item(status=' ', wc_rev='2'),
'A/C/H/chi' : Item(status=' ', wc_rev='2'),
'A/C/H/I' : Item(status=' ', wc_rev='2'),
})
expected_status.tweak(wc_rev='2', status=' ')
expected_status.tweak('A/B/F/pi', 'A/C/H', treeconflict=None)
expected_status.tweak('A/D/G', switched='S')
svntest.main.run_svn(None, 'revert', '-R', sbox.ospath('A/C/H'))
actions.run_and_verify_update(wc_dir, expected_output, expected_disk,
expected_status)
def switch_with_obstructing_local_adds(sbox):
sbox.build(read_only = True)
G_path = sbox.ospath('A/B/F/G')
os.mkdir(G_path)
gamma_copy_path = sbox.ospath('A/B/F/gamma')
shutil.copyfile(sbox.ospath('A/D/gamma'),
gamma_copy_path)
shutil.copyfile(sbox.ospath('A/D/G/tau'),
sbox.ospath('A/B/F/G/tau'))
pi_path = sbox.ospath('A/B/F/G/pi')
svntest.main.file_write(pi_path,
"This is the OBSTRUCTING file 'pi'.\n")
I_path = sbox.ospath('A/B/F/I')
os.mkdir(I_path)
upsilon_path = os.path.join(G_path, 'upsilon')
svntest.main.file_write(upsilon_path,
"This is the unversioned file 'upsilon'.\n")
svntest.actions.run_and_verify_svn(None, [],
'add', G_path, I_path,
gamma_copy_path)
expected_output = svntest.wc.State(sbox.wc_dir, {
"A/B/F/gamma" : Item(status=' ', treeconflict='C'),
"A/B/F/G" : Item(status=' ', treeconflict='C'),
'A/B/F/G/tau' : Item(status=' ', treeconflict='A'),
'A/B/F/G/rho' : Item(status=' ', treeconflict='A'),
'A/B/F/G/pi' : Item(status=' ', treeconflict='A'),
"A/B/F/H" : Item(status='A '),
"A/B/F/H/chi" : Item(status='A '),
"A/B/F/H/omega" : Item(status='A '),
"A/B/F/H/psi" : Item(status='A '),
})
expected_disk = svntest.main.greek_state.copy()
expected_disk.add({
"A/B/F/gamma" : Item("This is the file 'gamma'.\n"),
"A/B/F/G" : Item(),
"A/B/F/G/pi" : Item("This is the OBSTRUCTING file 'pi'.\n"),
"A/B/F/G/tau" : Item("This is the file 'tau'.\n"),
"A/B/F/G/upsilon" : Item("This is the unversioned file 'upsilon'.\n"),
"A/B/F/H" : Item(),
"A/B/F/H/chi" : Item("This is the file 'chi'.\n"),
"A/B/F/H/omega" : Item("This is the file 'omega'.\n"),
"A/B/F/H/psi" : Item("This is the file 'psi'.\n"),
"A/B/F/I" : Item(),
})
expected_status = svntest.actions.get_virginal_state(sbox.wc_dir, 1)
expected_status.tweak('A/B/F', switched='S')
expected_status.add({
'A/B/F/gamma' : Item(status='R ', treeconflict='C', wc_rev='1'),
'A/B/F/G' : Item(status='R ', treeconflict='C', wc_rev='1'),
'A/B/F/G/pi' : Item(status='A ', wc_rev='-', entry_status='R ', entry_rev='1'),
'A/B/F/G/tau' : Item(status='A ', wc_rev='-', entry_status='R ', entry_rev='1'),
'A/B/F/G/upsilon' : Item(status='A ', wc_rev='-', entry_rev='0'),
'A/B/F/G/rho' : Item(status='D ', wc_rev='1'),
'A/B/F/H' : Item(status=' ', wc_rev='1'),
'A/B/F/H/chi' : Item(status=' ', wc_rev='1'),
'A/B/F/H/omega' : Item(status=' ', wc_rev='1'),
'A/B/F/H/psi' : Item(status=' ', wc_rev='1'),
'A/B/F/I' : Item(status='A ', wc_rev='-', entry_rev='0'),
})
F_path = sbox.ospath('A/B/F')
D_url = sbox.repo_url + '/A/D'
svntest.actions.run_and_verify_switch(sbox.wc_dir, F_path, D_url,
expected_output,
expected_disk,
expected_status,
[], False,
'--ignore-ancestry')
def switch_scheduled_add(sbox):
sbox.build(read_only = True)
wc_dir = sbox.wc_dir
file_path = sbox.ospath('stub_file')
switch_url = sbox.repo_url + '/iota'
nodo_path = sbox.ospath('nodo')
svntest.main.file_append(file_path, "")
svntest.actions.run_and_verify_svn(None, [],
'add', file_path)
svntest.actions.run_and_verify_svn(None,
"svn: E200007: Cannot switch '.*file' " +
"because it is not in the repository yet",
'switch', '--ignore-ancestry',
switch_url, file_path)
svntest.actions.run_and_verify_svn(None,
"svn: E155010: The node '.*nodo' was not",
'switch', '--ignore-ancestry',
switch_url, nodo_path)
@SkipUnless(server_has_mergeinfo)
def mergeinfo_switch_elision(sbox):
sbox.build()
wc_dir = sbox.wc_dir
lambda_path = sbox.ospath('A/B_COPY_1/lambda')
B_COPY_1_path = sbox.ospath('A/B_COPY_1')
B_COPY_2_path = sbox.ospath('A/B_COPY_2')
E_COPY_2_path = sbox.ospath('A/B_COPY_2/E')
alpha_path = sbox.ospath('A/B/E/alpha')
beta_path = sbox.ospath('A/B/E/beta')
# Make branches A/B_COPY_1 and A/B_COPY_2
expected_stdout = verify.UnorderedOutput([
"A " + B_COPY_1_path + "\n",
"A " + sbox.ospath('A/B_COPY_1/lambda') + "\n",
"A " + sbox.ospath('A/B_COPY_1/E') + "\n",
"A " + sbox.ospath('A/B_COPY_1/E/alpha') + "\n",
"A " + sbox.ospath('A/B_COPY_1/E/beta') + "\n",
"A " + sbox.ospath('A/B_COPY_1/F') + "\n",
])
svntest.actions.run_and_verify_svn(expected_stdout, [], 'copy',
sbox.repo_url + "/A/B", B_COPY_1_path)
expected_stdout = verify.UnorderedOutput([
"A " + B_COPY_2_path + "\n",
"A " + sbox.ospath('A/B_COPY_2/lambda') + "\n",
"A " + sbox.ospath('A/B_COPY_2/E') + "\n",
"A " + sbox.ospath('A/B_COPY_2/E/alpha') + "\n",
"A " + sbox.ospath('A/B_COPY_2/E/beta') + "\n",
"A " + sbox.ospath('A/B_COPY_2/F') + "\n",
])
svntest.actions.run_and_verify_svn(expected_stdout, [], 'copy',
sbox.repo_url + "/A/B", B_COPY_2_path)
expected_output = svntest.wc.State(wc_dir, {
'A/B_COPY_1' : Item(verb='Adding'),
'A/B_COPY_2' : Item(verb='Adding')
})
expected_status = svntest.actions.get_virginal_state(wc_dir, 1)
expected_status.add({
"A/B_COPY_1" : Item(status=' ', wc_rev=2),
"A/B_COPY_1/lambda" : Item(status=' ', wc_rev=2),
"A/B_COPY_1/E" : Item(status=' ', wc_rev=2),
"A/B_COPY_1/E/alpha" : Item(status=' ', wc_rev=2),
"A/B_COPY_1/E/beta" : Item(status=' ', wc_rev=2),
"A/B_COPY_1/F" : Item(status=' ', wc_rev=2),
"A/B_COPY_2" : Item(status=' ', wc_rev=2),
"A/B_COPY_2/lambda" : Item(status=' ', wc_rev=2),
"A/B_COPY_2/E" : Item(status=' ', wc_rev=2),
"A/B_COPY_2/E/alpha" : Item(status=' ', wc_rev=2),
"A/B_COPY_2/E/beta" : Item(status=' ', wc_rev=2),
"A/B_COPY_2/F" : Item(status=' ', wc_rev=2),
})
svntest.actions.run_and_verify_commit(wc_dir,
expected_output,
expected_status)
# Make some changes under A/B
# r3 - modify and commit A/B/E/beta
svntest.main.file_write(beta_path, "New content")
expected_output = svntest.wc.State(wc_dir,
{'A/B/E/beta' : Item(verb='Sending')})
expected_status.tweak('A/B/E/beta', wc_rev=3)
svntest.actions.run_and_verify_commit(wc_dir, expected_output,
expected_status)
# r4 - modify and commit A/B/E/alpha
svntest.main.file_write(alpha_path, "New content")
expected_output = svntest.wc.State(wc_dir,
{'A/B/E/alpha' : Item(verb='Sending')})
expected_status.tweak('A/B/E/alpha', wc_rev=4)
svntest.actions.run_and_verify_commit(wc_dir, expected_output,
expected_status)
# Merge r2:4 into A/B_COPY_1
expected_output = svntest.wc.State(B_COPY_1_path, {
'E/alpha' : Item(status='U '),
'E/beta' : Item(status='U '),
})
expected_mergeinfo_output = svntest.wc.State(B_COPY_1_path, {
'' : Item(status=' U'),
})
expected_elision_output = svntest.wc.State(B_COPY_1_path, {
})
expected_merge_status = svntest.wc.State(B_COPY_1_path, {
'' : Item(status=' M', wc_rev=2),
'lambda' : Item(status=' ', wc_rev=2),
'E' : Item(status=' ', wc_rev=2),
'E/alpha' : Item(status='M ', wc_rev=2),
'E/beta' : Item(status='M ', wc_rev=2),
'F' : Item(status=' ', wc_rev=2),
})
expected_merge_disk = svntest.wc.State('', {
'' : Item(props={SVN_PROP_MERGEINFO : '/A/B:3-4'}),
'lambda' : Item("This is the file 'lambda'.\n"),
'E' : Item(),
'E/alpha' : Item("New content"),
'E/beta' : Item("New content"),
'F' : Item(),
})
expected_skip = svntest.wc.State(B_COPY_1_path, { })
svntest.actions.run_and_verify_merge(B_COPY_1_path, '2', '4',
sbox.repo_url + '/A/B', None,
expected_output,
expected_mergeinfo_output,
expected_elision_output,
expected_merge_disk,
expected_merge_status,
expected_skip,
check_props=True)
# r5 - Commit the merge into A/B_COPY_1/E
expected_output = svntest.wc.State(
wc_dir,
{'A/B_COPY_1' : Item(verb='Sending'),
'A/B_COPY_1/E/alpha' : Item(verb='Sending'),
'A/B_COPY_1/E/beta' : Item(verb='Sending'),
})
expected_status.tweak('A/B_COPY_1', wc_rev=5)
expected_status.tweak('A/B_COPY_1/E/alpha', wc_rev=5)
expected_status.tweak('A/B_COPY_1/E/beta', wc_rev=5)
expected_status.tweak('A/B_COPY_1/lambda', wc_rev=2)
svntest.actions.run_and_verify_commit(wc_dir, expected_output,
expected_status)
# Merge r2:4 into A/B_COPY_2/E
expected_output = svntest.wc.State(E_COPY_2_path, {
'alpha' : Item(status='U '),
'beta' : Item(status='U '),
})
expected_mergeinfo_output = svntest.wc.State(E_COPY_2_path, {
'' : Item(status=' U'),
})
expected_elision_output = svntest.wc.State(E_COPY_2_path, {
})
expected_merge_status = svntest.wc.State(E_COPY_2_path, {
'' : Item(status=' M', wc_rev=2),
'alpha' : Item(status='M ', wc_rev=2),
'beta' : Item(status='M ', wc_rev=2),
})
expected_merge_disk = svntest.wc.State('', {
'' : Item(props={SVN_PROP_MERGEINFO : '/A/B/E:3-4'}),
'alpha' : Item("New content"),
'beta' : Item("New content"),
})
expected_skip = svntest.wc.State(E_COPY_2_path, { })
svntest.actions.run_and_verify_merge(E_COPY_2_path, '2', '4',
sbox.repo_url + '/A/B/E', None,
expected_output,
expected_mergeinfo_output,
expected_elision_output,
expected_merge_disk,
expected_merge_status,
expected_skip,
check_props=True)
# Switch A/B_COPY_2 to URL of A/B_COPY_1. The local mergeinfo for r1,3-4
# on A/B_COPY_2/E is identical to the mergeinfo added to A/B_COPY_2 as a
# result of the switch, but we leave the former in place.
# Setup expected results of switch.
expected_output = svntest.wc.State(sbox.wc_dir, {
"A/B_COPY_2" : Item(status=' U'),
"A/B_COPY_2/E/alpha" : Item(status='G '),
"A/B_COPY_2/E/beta" : Item(status='G '),
})
expected_disk = svntest.main.greek_state.copy()
expected_disk.tweak("A/B/E/alpha", contents="New content")
expected_disk.tweak("A/B/E/beta", contents="New content")
expected_disk.add({
"A/B_COPY_1" : Item(props={SVN_PROP_MERGEINFO : '/A/B:3-4'}),
"A/B_COPY_1/E" : Item(),
"A/B_COPY_1/F" : Item(),
"A/B_COPY_1/lambda" : Item("This is the file 'lambda'.\n"),
"A/B_COPY_1/E/alpha" : Item("New content"),
"A/B_COPY_1/E/beta" : Item("New content"),
"A/B_COPY_2" : Item(props={SVN_PROP_MERGEINFO : '/A/B:3-4'}),
"A/B_COPY_2/E" : Item(props={SVN_PROP_MERGEINFO : '/A/B/E:3-4'}),
"A/B_COPY_2/F" : Item(),
"A/B_COPY_2/lambda" : Item("This is the file 'lambda'.\n"),
"A/B_COPY_2/E/alpha" : Item("New content"),
"A/B_COPY_2/E/beta" : Item("New content"),
})
expected_status = svntest.actions.get_virginal_state(sbox.wc_dir, 1)
expected_status.tweak("A/B/E/beta", wc_rev=3)
expected_status.tweak("A/B/E/alpha", wc_rev=4)
expected_status.add({
"A/B_COPY_1" : Item(status=' ', wc_rev=5),
"A/B_COPY_1/E" : Item(status=' ', wc_rev=2),
"A/B_COPY_1/F" : Item(status=' ', wc_rev=2),
"A/B_COPY_1/lambda" : Item(status=' ', wc_rev=2),
"A/B_COPY_1/E/alpha" : Item(status=' ', wc_rev=5),
"A/B_COPY_1/E/beta" : Item(status=' ', wc_rev=5),
"A/B_COPY_2" : Item(status=' ', wc_rev=5, switched='S'),
"A/B_COPY_2/E" : Item(status=' M', wc_rev=5),
"A/B_COPY_2/F" : Item(status=' ', wc_rev=5),
"A/B_COPY_2/lambda" : Item(status=' ', wc_rev=5),
"A/B_COPY_2/E/alpha" : Item(status=' ', wc_rev=5),
"A/B_COPY_2/E/beta" : Item(status=' ', wc_rev=5),
})
svntest.actions.run_and_verify_switch(sbox.wc_dir,
B_COPY_2_path,
sbox.repo_url + "/A/B_COPY_1",
expected_output,
expected_disk,
expected_status,
[], True,
'--ignore-ancestry')
# Now check a switch which reverses and earlier switch and leaves
# a path in an unswitched state.
#
# Switch A/B_COPY_1/lambda to iota. Use propset to give A/B_COPY/lambda
# the mergeinfo '/A/B/lambda:1,3-4'. Then switch A/B_COPY_1/lambda back
# to A/B_COPY_1/lambda. The local mergeinfo for r1,3-4 should remain on
# A/B_COPY_1/lambda.
expected_output = svntest.wc.State(sbox.wc_dir, {
"A/B_COPY_1/lambda" : Item(status='U '),
})
expected_disk.tweak("A/B_COPY_1/lambda",
contents="This is the file 'iota'.\n")
expected_status.tweak("A/B_COPY_1/lambda", wc_rev=5, switched='S')
svntest.actions.run_and_verify_switch(sbox.wc_dir,
lambda_path,
sbox.repo_url + "/iota",
expected_output,
expected_disk,
expected_status,
[], True,
'--ignore-ancestry')
svntest.actions.run_and_verify_svn(["property '" + SVN_PROP_MERGEINFO +
"' set on '" + lambda_path + "'" +
"\n"], [], 'ps', SVN_PROP_MERGEINFO,
'/A/B/lambda:3-4', lambda_path)
expected_output = svntest.wc.State(sbox.wc_dir, {
"A/B_COPY_1/lambda" : Item(status='U '),
})
expected_disk.tweak("A/B_COPY_1/lambda",
contents="This is the file 'lambda'.\n",
props={SVN_PROP_MERGEINFO : '/A/B/lambda:3-4'})
expected_status.tweak("A/B_COPY_1/lambda", switched=None, status=' M')
svntest.actions.run_and_verify_switch(sbox.wc_dir,
lambda_path,
sbox.repo_url + "/A/B_COPY_1/lambda",
expected_output,
expected_disk,
expected_status,
[], True,
'--ignore-ancestry')
#----------------------------------------------------------------------
def switch_with_depth(sbox):
sbox.build(read_only = True)
# Form some paths and URLs required
wc_dir = sbox.wc_dir
repo_url = sbox.repo_url
AD_url = repo_url + '/A/D'
AB_url = repo_url + '/A/B'
AB_path = sbox.ospath('A/B')
# Set up expected results of 'switch --depth=empty'
expected_output = svntest.wc.State(wc_dir, {})
expected_disk = svntest.main.greek_state.copy()
expected_status = svntest.actions.get_virginal_state(wc_dir, 1)
expected_status.tweak('A/B', switched='S')
expected_status.tweak('A/B/lambda', switched='S')
expected_status.tweak('A/B/E', switched='S')
expected_status.tweak('A/B/F', switched='S')
# Do 'switch --depth=empty' and check the results in three ways.
svntest.actions.run_and_verify_switch(wc_dir, AB_path, AD_url,
expected_output,
expected_disk,
expected_status,
[], False,
'--depth', 'empty', '--ignore-ancestry')
# Set up expected results for reverting 'switch --depth=empty'
expected_output = svntest.wc.State(wc_dir, {})
expected_disk = svntest.main.greek_state.copy()
expected_status = svntest.actions.get_virginal_state(wc_dir, 1)
svntest.actions.run_and_verify_switch(wc_dir, AB_path, AB_url,
expected_output,
expected_disk,
expected_status,
[], False,
'--depth', 'empty', '--ignore-ancestry')
# Set up expected results of 'switch --depth=files'
expected_output = svntest.wc.State(wc_dir, {
'A/B/lambda' : Item(status='D '),
'A/B/gamma' : Item(status='A '),
})
expected_disk = svntest.main.greek_state.copy()
expected_disk.remove('A/B/lambda')
expected_disk.add({
'A/B/gamma' : Item("This is the file 'gamma'.\n")
})
expected_status = svntest.actions.get_virginal_state(wc_dir, 1)
expected_status.remove('A/B/lambda')
expected_status.add({
'A/B/gamma' : Item(status=' ', wc_rev=1)
})
expected_status.tweak('A/B', switched='S')
expected_status.tweak('A/B/E', switched='S')
expected_status.tweak('A/B/F', switched='S')
# Do 'switch --depth=files' and check the results in three ways.
svntest.actions.run_and_verify_switch(wc_dir, AB_path, AD_url,
expected_output,
expected_disk,
expected_status,
[], False,
'--depth', 'files', '--ignore-ancestry')
# Set up expected results for reverting 'switch --depth=files'
expected_output = svntest.wc.State(wc_dir, {
'A/B/gamma' : Item(status='D '),
'A/B/lambda' : Item(status='A '),
})
expected_disk = svntest.main.greek_state.copy()
expected_status = svntest.actions.get_virginal_state(wc_dir, 1)
svntest.actions.run_and_verify_switch(wc_dir, AB_path, AB_url,
expected_output,
expected_disk,
expected_status,
[], False,
'--depth', 'files', '--ignore-ancestry')
# Putting the depth=immediates stuff in a subroutine, because we're
def sw_depth_imm():
expected_output = svntest.wc.State(wc_dir, {
'A/B/lambda' : Item(status='D '),
'A/B/E' : Item(status='D '),
'A/B/F' : Item(status='D '),
'A/B/gamma' : Item(status='A '),
'A/B/G' : Item(status='A '),
'A/B/H' : Item(status='A '),
})
expected_disk = svntest.main.greek_state.copy()
expected_disk.remove('A/B/lambda', 'A/B/E/beta', 'A/B/E/alpha',
'A/B/E', 'A/B/F')
expected_disk.add({
'A/B/gamma' : Item("This is the file 'gamma'.\n"),
'A/B/G' : Item(),
'A/B/H' : Item(),
})
expected_status = svntest.actions.get_virginal_state(wc_dir, 1)
expected_status.remove('A/B/lambda', 'A/B/E/beta', 'A/B/E/alpha',
'A/B/E', 'A/B/F')
expected_status.add({
'A/B/gamma' : Item(status=' ', wc_rev=1),
'A/B/G' : Item(status=' ', wc_rev=1),
'A/B/H' : Item(status=' ', wc_rev=1)
})
expected_status.tweak('A/B', switched='S')
svntest.actions.run_and_verify_switch(wc_dir, AB_path, AD_url,
expected_output,
expected_disk,
expected_status,
[], False,
'--depth', 'immediates',
'--ignore-ancestry')
sw_depth_imm()
expected_output = svntest.wc.State(wc_dir, {
'A/B/gamma' : Item(status='D '),
'A/B/G' : Item(status='D '),
'A/B/H' : Item(status='D '),
'A/B/lambda' : Item(status='A '),
'A/B/E' : Item(status='A '),
'A/B/E/alpha' : Item(status='A '),
'A/B/E/beta' : Item(status='A '),
'A/B/F' : Item(status='A '),
})
expected_disk = svntest.main.greek_state.copy()
expected_status = svntest.actions.get_virginal_state(wc_dir, 1)
svntest.actions.run_and_verify_switch(wc_dir, AB_path, AB_url,
expected_output,
expected_disk,
expected_status,
[], False,
'--ignore-ancestry')
# 'switch --depth=infinity', to test going all the way.)
sw_depth_imm()
# Set up expected results of 'switch --depth=infinity'
expected_output = svntest.wc.State(wc_dir, {
'A/B/gamma' : Item(status='D '),
'A/B/G' : Item(status='D '),
'A/B/H' : Item(status='D '),
'A/B/lambda' : Item(status='A '),
'A/B/E' : Item(status='A '),
'A/B/E/alpha' : Item(status='A '),
'A/B/E/beta' : Item(status='A '),
'A/B/F' : Item(status='A '),
})
expected_disk = svntest.main.greek_state.copy()
expected_status = svntest.actions.get_virginal_state(wc_dir, 1)
# Do the 'switch --depth=infinity' and check the results in three ways.
svntest.actions.run_and_verify_switch(wc_dir, AB_path, AB_url,
expected_output,
expected_disk,
expected_status,
[], False,
'--depth', 'infinity',
'--ignore-ancestry')
#----------------------------------------------------------------------
def switch_to_dir_with_peg_rev(sbox):
sbox.build()
wc_dir = sbox.wc_dir
repo_url = sbox.repo_url
# prepare two dirs X and Y in rev. 2
X_path = sbox.ospath('X')
Y_path = sbox.ospath('Y')
svntest.main.run_svn(None, 'mkdir', X_path, Y_path)
sbox.simple_commit(message='log message')
# change tau in rev. 3
ADG_path = sbox.ospath('A/D/G')
tau_path = os.path.join(ADG_path, 'tau')
svntest.main.file_append(tau_path, "new line\n")
sbox.simple_commit(message='log message')
# delete A/D/G in rev. 4
svntest.main.run_svn(None, 'up', wc_dir)
svntest.main.run_svn(None, 'rm', ADG_path)
sbox.simple_commit(message='log message')
# Test 1: switch X to A/D/G@2
ADG_url = repo_url + '/A/D/G'
expected_output = svntest.wc.State(wc_dir, {
'X/pi' : Item(status='A '),
'X/rho' : Item(status='A '),
'X/tau' : Item(status='A '),
})
expected_disk = svntest.main.greek_state.copy()
expected_disk.add({
'X' : Item(),
'X/pi' : Item("This is the file 'pi'.\n"),
'X/rho' : Item("This is the file 'rho'.\n"),
'X/tau' : Item("This is the file 'tau'.\n"),
'Y' : Item(),
})
expected_disk.remove('A/D/G', 'A/D/G/pi', 'A/D/G/rho', 'A/D/G/tau')
expected_status = svntest.actions.get_virginal_state(wc_dir, 3)
expected_status.remove('A/D/G', 'A/D/G/pi', 'A/D/G/rho', 'A/D/G/tau')
expected_status.add({
'X' : Item(status=' ', wc_rev=2, switched='S'),
'X/pi' : Item(status=' ', wc_rev=2),
'X/rho' : Item(status=' ', wc_rev=2),
'X/tau' : Item(status=' ', wc_rev=2),
'Y' : Item(status=' ', wc_rev=3)
})
# Do the switch to rev. 2 of /A/D/G@3.
svntest.actions.run_and_verify_switch(wc_dir, X_path, ADG_url + '@3',
expected_output,
expected_disk,
expected_status,
[], False,
'-r', '2', '--ignore-ancestry')
def switch_urls_with_spaces(sbox):
sbox.build()
wc_dir = sbox.wc_dir
repo_url = sbox.repo_url
# add file and directory with spaces in their names.
XYZ_path = sbox.ospath('X Y Z')
ABC_path = sbox.ospath('A B C')
svntest.main.run_svn(None, 'mkdir', XYZ_path, ABC_path)
tpm_path = sbox.ospath('tau pau mau')
bbb_path = sbox.ospath('bar baz bal')
svntest.main.file_write(tpm_path, "This is the file 'tau pau mau'.\n")
svntest.main.file_write(bbb_path, "This is the file 'bar baz bal'.\n")
svntest.main.run_svn(None, 'add', tpm_path, bbb_path)
sbox.simple_commit(message='log message')
# Test 1: switch directory 'A B C' to url 'X Y Z'
XYZ_url = repo_url + '/X Y Z'
expected_output = svntest.wc.State(wc_dir, {
})
expected_disk = svntest.main.greek_state.copy()
expected_disk.add({
'X Y Z' : Item(),
'A B C' : Item(),
'tau pau mau' : Item("This is the file 'tau pau mau'.\n"),
'bar baz bal' : Item("This is the file 'bar baz bal'.\n"),
})
expected_status = svntest.actions.get_virginal_state(wc_dir, 1)
expected_status.add({
'X Y Z' : Item(status=' ', wc_rev=2),
'A B C' : Item(status=' ', wc_rev=2, switched='S'),
'tau pau mau' : Item(status=' ', wc_rev=2),
'bar baz bal' : Item(status=' ', wc_rev=2),
})
svntest.actions.run_and_verify_switch(wc_dir, ABC_path, XYZ_url,
expected_output,
expected_disk,
expected_status,
[],
False, '--ignore-ancestry')
# Test 2: switch file 'bar baz bal' to 'tau pau mau'
tpm_url = repo_url + '/tau pau mau'
expected_output = svntest.wc.State(wc_dir, {
'bar baz bal' : Item(status='U '),
})
expected_disk = svntest.main.greek_state.copy()
expected_disk.add({
'X Y Z' : Item(),
'A B C' : Item(),
'tau pau mau' : Item("This is the file 'tau pau mau'.\n"),
'bar baz bal' : Item("This is the file 'tau pau mau'.\n"),
})
expected_status = svntest.actions.get_virginal_state(wc_dir, 1)
expected_status.add({
'X Y Z' : Item(status=' ', wc_rev=2),
'A B C' : Item(status=' ', wc_rev=2, switched='S'),
'tau pau mau' : Item(status=' ', wc_rev=2),
'bar baz bal' : Item(status=' ', wc_rev=2, switched='S'),
})
svntest.actions.run_and_verify_switch(wc_dir, bbb_path, tpm_url,
expected_output,
expected_disk,
expected_status,
[],
False, '--ignore-ancestry')
def switch_to_dir_with_peg_rev2(sbox):
sbox.build()
wc_dir = sbox.wc_dir
repo_url = sbox.repo_url
# prepare dir X in rev. 2
X_path = sbox.ospath('X')
svntest.main.run_svn(None, 'mkdir', X_path)
sbox.simple_commit(message='log message')
# make a change in ADG in rev. 3
tau_path = sbox.ospath('A/D/G/tau')
svntest.main.file_append(tau_path, "extra line\n")
sbox.simple_commit(message='log message')
# Rename ADG to ADY in rev 4
svntest.main.run_svn(None, 'up', wc_dir)
ADG_path = sbox.ospath('A/D/G')
ADY_path = sbox.ospath('A/D/Y')
svntest.main.run_svn(None, 'mv', ADG_path, ADY_path)
sbox.simple_commit(message='log message')
# Test switch X to rev 2 of A/D/Y@HEAD
ADY_url = sbox.repo_url + '/A/D/Y'
expected_output = svntest.wc.State(wc_dir, {
'X/pi' : Item(status='A '),
'X/rho' : Item(status='A '),
'X/tau' : Item(status='A '),
})
expected_disk = svntest.main.greek_state.copy()
expected_disk.add({
'X' : Item(),
'X/pi' : Item("This is the file 'pi'.\n"),
'X/rho' : Item("This is the file 'rho'.\n"),
'X/tau' : Item("This is the file 'tau'.\n"),
'A/D/Y' : Item(),
'A/D/Y/pi' : Item("This is the file 'pi'.\n"),
'A/D/Y/rho' : Item("This is the file 'rho'.\n"),
'A/D/Y/tau' : Item("This is the file 'tau'.\nextra line\n"),
})
expected_disk.remove('A/D/G', 'A/D/G/pi', 'A/D/G/rho', 'A/D/G/tau')
expected_status = svntest.actions.get_virginal_state(wc_dir, 3)
expected_status.remove('A/D/G', 'A/D/G/pi', 'A/D/G/rho', 'A/D/G/tau')
expected_status.add({
'X' : Item(status=' ', wc_rev=2, switched='S'),
'X/pi' : Item(status=' ', wc_rev=2),
'X/rho' : Item(status=' ', wc_rev=2),
'X/tau' : Item(status=' ', wc_rev=2),
'A/D/Y' : Item(status=' ', wc_rev=4),
'A/D/Y/pi' : Item(status=' ', wc_rev=4),
'A/D/Y/rho' : Item(status=' ', wc_rev=4),
'A/D/Y/tau' : Item(status=' ', wc_rev=4),
})
svntest.actions.run_and_verify_switch(wc_dir, X_path, ADY_url + '@HEAD',
expected_output,
expected_disk,
expected_status, [], False,
'-r', '2', '--ignore-ancestry')
def switch_to_root(sbox):
sbox.build(read_only = True)
wc_dir = sbox.wc_dir
repo_url = sbox.repo_url
ADG_path = sbox.ospath('A/D/G')
# Test switch /A/D/G to /
AD_url = sbox.repo_url + '/A/D'
expected_output = svntest.wc.State(wc_dir, {
'A/D/G/pi' : Item(status='D '),
'A/D/G/rho' : Item(status='D '),
'A/D/G/tau' : Item(status='D '),
'A/D/G/A' : Item(status='A '),
'A/D/G/A/B' : Item(status='A '),
'A/D/G/A/B/lambda' : Item(status='A '),
'A/D/G/A/B/E' : Item(status='A '),
'A/D/G/A/B/E/alpha' : Item(status='A '),
'A/D/G/A/B/E/beta' : Item(status='A '),
'A/D/G/A/B/F' : Item(status='A '),
'A/D/G/A/mu' : Item(status='A '),
'A/D/G/A/C' : Item(status='A '),
'A/D/G/A/D' : Item(status='A '),
'A/D/G/A/D/gamma' : Item(status='A '),
'A/D/G/A/D/G' : Item(status='A '),
'A/D/G/A/D/G/pi' : Item(status='A '),
'A/D/G/A/D/G/rho' : Item(status='A '),
'A/D/G/A/D/G/tau' : Item(status='A '),
'A/D/G/A/D/H' : Item(status='A '),
'A/D/G/A/D/H/chi' : Item(status='A '),
'A/D/G/A/D/H/omega' : Item(status='A '),
'A/D/G/A/D/H/psi' : Item(status='A '),
'A/D/G/iota' : Item(status='A '),
})
expected_disk = svntest.main.greek_state.copy()
expected_disk.remove('A/D/G/pi', 'A/D/G/rho', 'A/D/G/tau')
expected_disk.add_state('A/D/G', svntest.main.greek_state.copy())
expected_status = svntest.actions.get_virginal_state(wc_dir, 1)
expected_status.remove('A/D/G/pi', 'A/D/G/rho', 'A/D/G/tau')
expected_status.add_state('A/D/G',
svntest.actions.get_virginal_state(wc_dir + '/A/D/G', 1))
expected_status.tweak('A/D/G', switched = 'S')
svntest.actions.run_and_verify_switch(wc_dir, ADG_path, sbox.repo_url,
expected_output,
expected_disk,
expected_status,
[],
False, '--ignore-ancestry')
#----------------------------------------------------------------------
# Make sure that switch continue after deleting locally modified
# directories, as update and merge do.
@Issue(2505)
def tolerate_local_mods(sbox):
sbox.build()
wc_dir = sbox.wc_dir
A_path = sbox.ospath('A')
L_path = os.path.join(A_path, 'L')
LM_path = os.path.join(L_path, 'local_mod')
A_url = sbox.repo_url + '/A'
A2_url = sbox.repo_url + '/A2'
svntest.actions.run_and_verify_svn(['Committing transaction...\n',
'Committed revision 2.\n'], [],
'cp', '-m', 'make copy', A_url, A2_url)
os.mkdir(L_path)
svntest.main.run_svn(None, 'add', L_path)
sbox.simple_commit(message='Commit added folder')
# locally modified versioned file
svntest.main.file_write(LM_path, 'Locally modified file.\n', 'w+')
sbox.simple_add('A/L/local_mod')
expected_output = svntest.wc.State(wc_dir, {
'A/L' : Item(status=' ', treeconflict='C'),
})
expected_disk = svntest.main.greek_state.copy()
expected_disk.add({
'A/L' : Item(),
'A/L/local_mod' : Item(contents='Locally modified file.\n'),
})
expected_status = svntest.actions.get_virginal_state(wc_dir, 3)
expected_status.tweak('', 'iota', wc_rev=1)
expected_status.tweak('A', switched='S')
expected_status.add({
'A/L' : Item(status='A ', copied='+', treeconflict='C', wc_rev='-'),
'A/L/local_mod' : Item(status='A ', wc_rev='-'),
})
# Used to fail with locally modified or unversioned files
svntest.actions.run_and_verify_switch(wc_dir, A_path, A2_url,
expected_output,
expected_disk,
expected_status,
[],
False, '--ignore-ancestry')
#----------------------------------------------------------------------
# Detect tree conflicts among files and directories,
# edited or deleted in a deep directory structure.
#
# See use cases 1-3 in notes/tree-conflicts/use-cases.txt for background.
# Note that we do not try to track renames. The only difference from
# the behavior of Subversion 1.4 and 1.5 is the conflicted status of the
# parent directory.
# convenience definitions
leaf_edit = svntest.deeptrees.deep_trees_leaf_edit
tree_del = svntest.deeptrees.deep_trees_tree_del
leaf_del = svntest.deeptrees.deep_trees_leaf_del
disk_after_leaf_edit = svntest.deeptrees.deep_trees_after_leaf_edit
disk_after_leaf_del = svntest.deeptrees.deep_trees_after_leaf_del
disk_after_tree_del = svntest.deeptrees.deep_trees_after_tree_del
deep_trees_conflict_output = svntest.deeptrees.deep_trees_conflict_output
deep_trees_conflict_output_skipped = \
svntest.deeptrees.deep_trees_conflict_output_skipped
deep_trees_status_local_tree_del = \
svntest.deeptrees.deep_trees_status_local_tree_del
deep_trees_status_local_leaf_edit = \
svntest.deeptrees.deep_trees_status_local_leaf_edit
DeepTreesTestCase = svntest.deeptrees.DeepTreesTestCase
j = os.path.join
def tree_conflicts_on_switch_1_1(sbox):
sbox.build()
# use case 1, as in notes/tree-conflicts/use-cases.txt
# 1.1) local tree delete, incoming leaf edit
expected_output = deep_trees_conflict_output.copy()
expected_output.add({
'DDD/D1/D2' : Item(status=' ', treeconflict='U'),
'DDD/D1/D2/D3' : Item(status=' ', treeconflict='U'),
'DDD/D1/D2/D3/zeta' : Item(status=' ', treeconflict='A'),
'DD/D1/D2' : Item(status=' ', treeconflict='U'),
'DD/D1/D2/epsilon' : Item(status=' ', treeconflict='A'),
'DF/D1/beta' : Item(status=' ', treeconflict='U'),
'D/D1/delta' : Item(status=' ', treeconflict='A'),
'DDF/D1/D2' : Item(status=' ', treeconflict='U'),
'DDF/D1/D2/gamma' : Item(status=' ', treeconflict='U')
})
expected_disk = svntest.wc.State('', {
'F' : Item(),
'D' : Item(),
'DF' : Item(),
'DD' : Item(),
'DDF' : Item(),
'DDD' : Item(),
})
# The files delta, epsilon, and zeta are incoming additions, but since
# they are all within locally deleted trees they should also be schedule
# for deletion.
expected_status = deep_trees_status_local_tree_del.copy()
expected_status.add({
'D/D1/delta' : Item(status='D '),
'DD/D1/D2/epsilon' : Item(status='D '),
'DDD/D1/D2/D3/zeta' : Item(status='D '),
})
expected_status.tweak('', switched='S')
# Update to the target rev.
expected_status.tweak(wc_rev=3)
expected_info = {
'F/alpha' : {
'Tree conflict' :
'^local file delete, incoming file edit upon switch'
+ ' Source left: .file.*/F/alpha@2'
+ ' Source right: .file.*/F/alpha@3$',
},
'DF/D1' : {
'Tree conflict' :
'^local dir delete, incoming dir edit upon switch'
+ ' Source left: .dir.*/DF/D1@2'
+ ' Source right: .dir.*/DF/D1@3$',
},
'DDF/D1' : {
'Tree conflict' :
'^local dir delete, incoming dir edit upon switch'
+ ' Source left: .dir.*/DDF/D1@2'
+ ' Source right: .dir.*/DDF/D1@3$',
},
'D/D1' : {
'Tree conflict' :
'^local dir delete, incoming dir edit upon switch'
+ ' Source left: .dir.*/D/D1@2'
+ ' Source right: .dir.*/D/D1@3$',
},
'DD/D1' : {
'Tree conflict' :
'^local dir delete, incoming dir edit upon switch'
+ ' Source left: .dir.*/DD/D1@2'
+ ' Source right: .dir.*/DD/D1@3$',
},
'DDD/D1' : {
'Tree conflict' :
'^local dir delete, incoming dir edit upon switch'
+ ' Source left: .dir.*/DDD/D1@2'
+ ' Source right: .dir.*/DDD/D1@3$',
},
}
svntest.deeptrees.deep_trees_run_tests_scheme_for_switch(sbox,
[ DeepTreesTestCase("local_tree_del_incoming_leaf_edit",
tree_del,
leaf_edit,
expected_output,
expected_disk,
expected_status,
expected_info = expected_info) ] )
@Issue(3334)
def tree_conflicts_on_switch_1_2(sbox):
sbox.build()
# 1.2) local tree delete, incoming leaf delete
expected_output = deep_trees_conflict_output.copy()
expected_output.add({
'DD/D1/D2' : Item(status=' ', treeconflict='D'),
'DDF/D1/D2' : Item(status=' ', treeconflict='U'),
'DDF/D1/D2/gamma' : Item(status=' ', treeconflict='D'),
'DDD/D1/D2' : Item(status=' ', treeconflict='U'),
'DDD/D1/D2/D3' : Item(status=' ', treeconflict='D'),
'DF/D1/beta' : Item(status=' ', treeconflict='D'),
})
expected_disk = svntest.wc.State('', {
'F' : Item(),
'D' : Item(),
'DF' : Item(),
'DD' : Item(),
'DDF' : Item(),
'DDD' : Item(),
})
expected_status = deep_trees_status_local_tree_del.copy()
# Expect the incoming leaf deletes to actually occur. Even though they
# are within (or in the case of F/alpha and D/D1 are the same as) the
# trees locally scheduled for deletion we must still delete them and
# update the scheduled for deletion items to the target rev. Otherwise
# once the conflicts are resolved we still have a mixed-rev WC we can't
expected_status.tweak(wc_rev=3)
expected_status.tweak('F/alpha',
'D/D1',
status='! ', wc_rev=None)
expected_status.tweak('', switched='S')
expected_status.remove('DD/D1/D2',
'DDD/D1/D2/D3',
'DDF/D1/D2/gamma',
'DF/D1/beta')
expected_info = {
'F/alpha' : {
'Tree conflict' :
'^local file delete, incoming file delete or move upon switch'
+ ' Source left: .file.*/F/alpha@2'
+ ' Source right: .none.*(/F/alpha@3)?$',
},
'DF/D1' : {
'Tree conflict' :
'^local dir delete, incoming dir edit upon switch'
+ ' Source left: .dir.*/DF/D1@2'
+ ' Source right: .dir.*/DF/D1@3$',
},
'DDF/D1' : {
'Tree conflict' :
'^local dir delete, incoming dir edit upon switch'
+ ' Source left: .dir.*/DDF/D1@2'
+ ' Source right: .dir.*/DDF/D1@3$',
},
'D/D1' : {
'Tree conflict' :
'^local dir delete, incoming dir delete or move upon switch'
+ ' Source left: .dir.*/D/D1@2'
+ ' Source right: .none.*(/D/D1@3)?$',
},
'DD/D1' : {
'Tree conflict' :
'^local dir delete, incoming dir edit upon switch'
+ ' Source left: .dir.*/DD/D1@2'
+ ' Source right: .dir.*/DD/D1@3$',
},
'DDD/D1' : {
'Tree conflict' :
'^local dir delete, incoming dir edit upon switch'
+ ' Source left: .dir.*/DDD/D1@2'
+ ' Source right: .dir.*/DDD/D1@3$',
},
}
svntest.deeptrees.deep_trees_run_tests_scheme_for_switch(sbox,
[ DeepTreesTestCase("local_tree_del_incoming_leaf_del",
tree_del,
leaf_del,
expected_output,
expected_disk,
expected_status,
expected_info = expected_info) ] )
@Issue(3334)
def tree_conflicts_on_switch_2_1(sbox):
expected_output = deep_trees_conflict_output
expected_disk = disk_after_leaf_edit.copy()
expected_status = deep_trees_status_local_leaf_edit.copy()
ected_status.tweak('D/D1',
'F/alpha',
'DD/D1',
'DF/D1',
'DDD/D1',
'DDF/D1',
status='A ', copied='+', wc_rev='-')
expected_status.tweak(
'DD/D1/D2',
'DDD/D1/D2',
'DDD/D1/D2/D3',
'DF/D1/beta',
'DDF/D1/D2',
'DDF/D1/D2/gamma',
copied='+', wc_rev='-')
expected_status.tweak('', switched='S')
expected_info = {
'F/alpha' : {
'Tree conflict' :
'^local file edit, incoming file delete or move upon switch'
+ ' Source left: .file.*/F/alpha@2'
+ ' Source right: .none.*(/F/alpha@3)?$',
},
'DF/D1' : {
'Tree conflict' :
'^local dir edit, incoming dir delete or move upon switch'
+ ' Source left: .dir.*/DF/D1@2'
+ ' Source right: .none.*(/DF/D1@3)?$',
},
'DDF/D1' : {
'Tree conflict' :
'^local dir edit, incoming dir delete or move upon switch'
+ ' Source left: .dir.*/DDF/D1@2'
+ ' Source right: .none.*(/DDF/D1@3)?$',
},
'D/D1' : {
'Tree conflict' :
'^local dir edit, incoming dir delete or move upon switch'
+ ' Source left: .dir.*/D/D1@2'
+ ' Source right: .none.*(/D/D1@3)?$',
},
'DD/D1' : {
'Tree conflict' :
'^local dir edit, incoming dir delete or move upon switch'
+ ' Source left: .dir.*/DD/D1@2'
+ ' Source right: .none.*(/DD/D1@3)?$',
},
'DDD/D1' : {
'Tree conflict' :
'^local dir edit, incoming dir delete or move upon switch'
+ ' Source left: .dir.*/DDD/D1@2'
+ ' Source right: .none.*(/DDD/D1@3)?$',
},
}
expected_status.tweak('DD/D1', 'DF/D1', 'DDF/D1', 'DDD/D1',
status='A ', copied='+', treeconflict='C',
wc_rev='-')
expected_status.tweak('DDF/D1/D2', 'DDD/D1/D2',
copied='+', wc_rev='-')
expected_status.tweak('DD/D1/D2', 'DF/D1/beta', 'DDD/D1/D2/D3',
'DDF/D1/D2/gamma',
status='D ', copied='+', wc_rev='-')
expected_status.tweak('F/alpha', 'D/D1',
status='! ', treeconflict='C', wc_rev=None)
expected_info = {
'F/alpha' : {
'Tree conflict' :
'^local file delete, incoming file delete or move upon switch'
+ ' Source left: .file.*/F/alpha@2'
+ ' Source right: .none.*(/F/alpha@3)?$',
},
'DF/D1' : {
'Tree conflict' :
'^local dir edit, incoming dir delete or move upon switch'
+ ' Source left: .dir.*/DF/D1@2'
+ ' Source right: .none.*(/DF/D1@3)?$',
},
'DDF/D1' : {
'Tree conflict' :
'^local dir edit, incoming dir delete or move upon switch'
+ ' Source left: .dir.*/DDF/D1@2'
+ ' Source right: .none.*(/DDF/D1@3)?$',
},
'D/D1' : {
'Tree conflict' :
'^local dir delete, incoming dir delete or move upon switch'
+ ' Source left: .dir.*/D/D1@2'
+ ' Source right: .none.*(/D/D1@3)?$',
},
'DD/D1' : {
'Tree conflict' :
'^local dir edit, incoming dir delete or move upon switch'
+ ' Source left: .dir.*/DD/D1@2'
+ ' Source right: .none.*(/DD/D1@3)?$',
},
'DDD/D1' : {
'Tree conflict' :
'^local dir edit, incoming dir delete or move upon switch'
+ ' Source left: .dir.*/DDD/D1@2'
+ ' Source right: .none.*(/DDD/D1@3)?$',
},
}
svntest.deeptrees.deep_trees_run_tests_scheme_for_switch(sbox,
[ DeepTreesTestCase("local_leaf_del_incoming_tree_del",
leaf_del,
tree_del,
expected_output,
expected_disk,
expected_status,
expected_info = expected_info) ] )
def tree_conflicts_on_switch_3(sbox):
expected_output = deep_trees_conflict_output
expected_disk = svntest.wc.State('', {
'F' : Item(),
'D' : Item(),
'DF' : Item(),
'DD' : Item(),
'DDF' : Item(),
'DDD' : Item(),
})
expected_status = deep_trees_status_local_tree_del.copy()
expected_status.tweak('', switched='S')
expected_status.tweak('F/alpha',
'D/D1',
'DD/D1',
'DF/D1',
'DDD/D1',
'DDF/D1',
status='! ', wc_rev=None)
expected_status.remove('DD/D1/D2',
'DF/D1/beta',
'DDD/D1/D2',
'DDD/D1/D2/D3',
'DDF/D1/D2',
'DDF/D1/D2/gamma',)
expected_info = {
'F/alpha' : {
'Tree conflict' :
'^local file delete, incoming file delete or move upon switch'
+ ' Source left: .file.*/F/alpha@2'
+ ' Source right: .none.*(/F/alpha@3)?$',
},
'DF/D1' : {
'Tree conflict' :
'^local dir delete, incoming dir delete or move upon switch'
+ ' Source left: .dir.*/DF/D1@2'
+ ' Source right: .none.*(/DF/D1@3)?$',
},
'DDF/D1' : {
'Tree conflict' :
'^local dir delete, incoming dir delete or move upon switch'
+ ' Source left: .dir.*/DDF/D1@2'
+ ' Source right: .none.*(/DDF/D1@3)?$',
},
'D/D1' : {
'Tree conflict' :
'^local dir delete, incoming dir delete or move upon switch'
+ ' Source left: .dir.*/D/D1@2'
+ ' Source right: .none.*(/D/D1@3)?$',
},
'DD/D1' : {
'Tree conflict' :
'^local dir delete, incoming dir delete or move upon switch'
+ ' Source left: .dir.*/DD/D1@2'
+ ' Source right: .none.*(/DD/D1@3)?$',
},
'DDD/D1' : {
'Tree conflict' :
'^local dir delete, incoming dir delete or move upon switch'
+ ' Source left: .dir.*/DDD/D1@2'
+ ' Source right: .none.*(/DDD/D1@3)?$',
},
}
svntest.deeptrees.deep_trees_run_tests_scheme_for_switch(sbox,
[ DeepTreesTestCase("local_tree_del_incoming_tree_del",
tree_del,
tree_del,
expected_output,
expected_disk,
expected_status,
expected_info = expected_info) ] )
def copy_with_switched_subdir(sbox):
sbox.build()
wc_dir = sbox.wc_dir
D = sbox.ospath('A/D')
G = os.path.join(D, 'G')
E_url = sbox.repo_url + '/A/B/E'
R = sbox.ospath('R')
state = svntest.actions.get_virginal_state(wc_dir, 1)
svntest.actions.run_and_verify_status(wc_dir, state)
svntest.actions.run_and_verify_svn(None, [], 'switch',
'--ignore-ancestry', E_url, G)
state.tweak('A/D/G', switched='S')
state.remove('A/D/G/pi', 'A/D/G/rho', 'A/D/G/tau')
state.add({
'A/D/G/alpha' : Item(status=' ', wc_rev=1),
'A/D/G/beta' : Item(status=' ', wc_rev=1),
})
svntest.actions.run_and_verify_status(wc_dir, state)
svntest.actions.run_and_verify_svn(None, [], 'cp', D, R)
state.add({
'R' : Item(status='A ', copied='+', wc_rev='-'),
'R/gamma' : Item(status=' ', copied='+', wc_rev='-'),
'R/G/alpha' : Item(status=' ', copied='+', wc_rev='-'),
'R/G/beta' : Item(status=' ', copied='+', wc_rev='-'),
'R/H' : Item(status=' ', copied='+', wc_rev='-'),
'R/H/chi' : Item(status=' ', copied='+', wc_rev='-'),
'R/H/omega' : Item(status=' ', copied='+', wc_rev='-'),
'R/H/psi' : Item(status=' ', copied='+', wc_rev='-'),
'R/G' : Item(status='A ', copied='+', wc_rev='-'),
})
svntest.actions.run_and_verify_status(wc_dir, state)
sbox.simple_commit(message='Commit added folder')
svntest.main.run_svn(None, 'up', wc_dir)
svntest.main.file_append(sbox.ospath('R/G/alpha'), "apple")
sbox.simple_commit(message='Commit changed file')
svntest.main.safe_rmtree(wc_dir, 1)
svntest.actions.run_and_verify_svn(None, [],
'checkout',
sbox.repo_url, wc_dir)
svntest.actions.run_and_verify_svn(None, [], 'switch',
'--ignore-ancestry', E_url, G)
state.tweak(status=' ', copied=None, wc_rev='3', entry_status=None)
state.tweak('A/D/G', switched='S')
svntest.actions.run_and_verify_status(wc_dir, state)
@Issue(3871)
def up_to_old_rev_with_subtree_switched_to_root(sbox):
sbox.build()
wc_dir = sbox.wc_dir
A_path = sbox.ospath('A')
branch_path = sbox.ospath('branch')
# Starting with a vanilla greek tree, create a branch of A, switch
# that branch to the root of the repository, then update the WC to
# r1.
svntest.actions.run_and_verify_svn(None, [], 'copy', A_path,
branch_path)
svntest.actions.run_and_verify_svn(None, [], 'ci', wc_dir,
'-m', 'Create a branch')
svntest.actions.run_and_verify_svn(None, [], 'sw', sbox.repo_url,
branch_path, '--ignore-ancestry')
# Now update the WC to r1.
svntest.actions.run_and_verify_svn(None, [], 'up', '-r1', wc_dir)
def different_node_kind(sbox):
sbox.build(read_only = True)
os.chdir(sbox.wc_dir)
sbox.wc_dir = ''
pristine_disk = svntest.main.greek_state
pristine_status = svntest.actions.get_virginal_state(sbox.wc_dir, 1)
expected_disk = pristine_disk.copy()
expected_status = pristine_status.copy()
def switch_to_dir(sbox, rel_url, rel_path):
full_url = sbox.repo_url + '/' + rel_url
full_path = sbox.ospath(rel_path)
expected_disk.remove(rel_path)
expected_disk.add({ rel_path : pristine_disk.desc[rel_url] })
expected_disk.add_state(rel_path, pristine_disk.subtree(rel_url))
expected_status.tweak(rel_path, switched='S')
expected_status.add_state(rel_path, pristine_status.subtree(rel_url))
svntest.actions.run_and_verify_switch(sbox.wc_dir, full_path, full_url,
None, expected_disk, expected_status,
[], False,
'--ignore-ancestry')
svntest.actions.run_and_verify_svn(None, [], 'info', full_path)
if not os.path.isdir(full_path):
raise svntest.Failure
def switch_to_file(sbox, rel_url, rel_path):
full_url = sbox.repo_url + '/' + rel_url
full_path = sbox.ospath(rel_path)
expected_disk.remove_subtree(rel_path)
expected_disk.add({ rel_path : pristine_disk.desc[rel_url] })
expected_status.remove_subtree(rel_path)
expected_status.add({ rel_path : pristine_status.desc[rel_url] })
expected_status.tweak(rel_path, switched='S')
svntest.actions.run_and_verify_switch(sbox.wc_dir, full_path, full_url,
None, expected_disk, expected_status,
[], False,
'--ignore-ancestry')
svntest.actions.run_and_verify_svn(None, [], 'info', full_path)
if not os.path.isfile(full_path):
raise svntest.Failure
# Switch two files to dirs and two dirs to files.
# 'A/C' is an empty dir; 'A/D/G' is a non-empty dir.
switch_to_dir(sbox, 'A/C', 'iota')
switch_to_dir(sbox, 'A/D/G', 'A/D/gamma')
switch_to_file(sbox, 'iota', 'A/C')
switch_to_file(sbox, 'A/D/gamma', 'A/D/G')
@Issue(3332, 3333)
def switch_to_spaces(sbox):
sbox.build()
wc_dir = sbox.wc_dir
repo_url = sbox.repo_url
# Paths are normalized in the command processing, so %20 is equivalent to ' '
svntest.actions.run_and_verify_svn(None, [],
'cp', repo_url + '/A',
repo_url + '/A%20with space',
'-m', '')
svntest.actions.run_and_verify_svn(None, [],
'mv', repo_url + '/A%20with space',
repo_url + '/A with%20more spaces',
'-m', '')
expected_status = svntest.actions.get_virginal_state(wc_dir, 3)
expected_status.tweak('A', switched='S')
expected_status.tweak('', 'iota', wc_rev=1)
svntest.actions.run_and_verify_switch(sbox.wc_dir, sbox.ospath('A'),
repo_url + '/A%20with more%20spaces',
None, None, expected_status)
def switch_across_replacement(sbox):
sbox.build()
os.chdir(sbox.wc_dir)
sbox.wc_dir = ''
# replacement
sbox.simple_rm('A/mu')
sbox.simple_append('A/mu', "This is the file 'mu'.\n", truncate=True)
sbox.simple_add('A/mu')
sbox.simple_commit() # r2
# When 'switch' of a dir brings in a replacement of a child file with no
# textual difference and ignoring ancestry, the switch doesn't report any
# a local mod). 'update' on the other hand does report the replacement
# as expected.
# This test FAILs when using a Subversion 1.0-1.7 svnserve.
expected_output = svntest.wc.State(sbox.wc_dir, {
'A/mu' : Item(status='A ', prev_status='D '),
})
svntest.actions.run_and_verify_update(sbox.wc_dir,
expected_output, None, None,
[], False,
'-r1')
svntest.actions.run_and_verify_update(sbox.wc_dir,
expected_output, None, None,
[], False,
'-r2')
svntest.actions.run_and_verify_switch(sbox.wc_dir, sbox.ospath('A'), '^/A',
expected_output, None, None,
[], False,
'-r1')
@Issue(1975)
def switch_keywords(sbox):
sbox.build()
gamma_path = sbox.ospath('A/D/gamma')
psi_path = sbox.ospath('A/D/H/psi')
sbox.simple_propset('svn:keywords', 'URL', 'A/D/gamma')
svntest.main.file_write(gamma_path, "$URL$\n")
sbox.simple_propset('svn:keywords', 'URL', 'A/D/H/psi')
svntest.main.file_write(psi_path, "$URL$\n")
sbox.simple_commit()
expected_disk = svntest.main.greek_state.copy()
expected_disk.tweak('A/D/gamma',
contents="$URL: %s/A/D/gamma $\n" % sbox.repo_url)
expected_disk.tweak('A/D/H/psi',
contents="$URL: %s/A/D/H/psi $\n" % sbox.repo_url)
svntest.actions.run_and_verify_update(sbox.wc_dir,
None, expected_disk, None)
sbox.simple_copy('A', 'A_copy')
sbox.simple_commit()
sbox.simple_update()
# Next, we're going to switch A to A_copy, and expect keywords
expected_disk.add({
'A_copy/D/H/chi' : Item(contents="This is the file 'chi'.\n"),
'A_copy/D/H/psi' : Item(contents="$URL: %s/A_copy/D/H/psi $\n"
% sbox.repo_url),
'A_copy/D/H/omega' : Item(contents="This is the file 'omega'.\n"),
'A_copy/D/G/pi' : Item(contents="This is the file 'pi'.\n"),
'A_copy/D/G/tau' : Item(contents="This is the file 'tau'.\n"),
'A_copy/D/G/rho' : Item(contents="This is the file 'rho'.\n"),
'A_copy/D/gamma' : Item(contents="$URL: %s/A_copy/D/gamma $\n"
% sbox.repo_url),
'A_copy/B/F' : Item(),
'A_copy/B/E/alpha' : Item(contents="This is the file 'alpha'.\n"),
'A_copy/B/E/beta' : Item(contents="This is the file 'beta'.\n"),
'A_copy/B/lambda' : Item(contents="This is the file 'lambda'.\n"),
'A_copy/mu' : Item(contents="This is the file 'mu'.\n"),
'A_copy/C' : Item(),
})
expected_disk.tweak('A/D/gamma',
contents="$URL: %s/A_copy/D/gamma $\n" % sbox.repo_url)
svntest.main.file_write(psi_path, "$URL$\nnew line\n")
expected_disk.tweak('A/D/H/psi',
contents="$URL: %s/A_copy/D/H/psi $\nnew line\n"
% sbox.repo_url)
expected_status = svntest.actions.get_virginal_state(sbox.wc_dir, 3)
expected_status.add({
'A_copy' : Item(status=' ', wc_rev='3'),
'A_copy/mu' : Item(status=' ', wc_rev='3'),
'A_copy/D' : Item(status=' ', wc_rev='3'),
'A_copy/D/H' : Item(status=' ', wc_rev='3'),
'A_copy/D/H/psi' : Item(status=' ', wc_rev='3'),
'A_copy/D/H/chi' : Item(status=' ', wc_rev='3'),
'A_copy/D/H/omega' : Item(status=' ', wc_rev='3'),
'A_copy/D/gamma' : Item(status=' ', wc_rev='3'),
'A_copy/D/G' : Item(status=' ', wc_rev='3'),
'A_copy/D/G/rho' : Item(status=' ', wc_rev='3'),
'A_copy/D/G/tau' : Item(status=' ', wc_rev='3'),
'A_copy/D/G/pi' : Item(status=' ', wc_rev='3'),
'A_copy/B' : Item(status=' ', wc_rev='3'),
'A_copy/B/E' : Item(status=' ', wc_rev='3'),
'A_copy/B/E/alpha' : Item(status=' ', wc_rev='3'),
'A_copy/B/E/beta' : Item(status=' ', wc_rev='3'),
'A_copy/B/F' : Item(status=' ', wc_rev='3'),
'A_copy/B/lambda' : Item(status=' ', wc_rev='3'),
'A_copy/C' : Item(status=' ', wc_rev='3'),
})
expected_status.tweak('A', switched='S')
expected_status.tweak('A/D/H/psi', status='M ')
svntest.actions.run_and_verify_switch(sbox.wc_dir, sbox.ospath('A'), '^/A_copy',
None, expected_disk, expected_status)
@Issue(4524)
def switch_moves(sbox):
sbox.build()
sbox.simple_move('A/B', 'B')
sbox.simple_rm('A')
branch_url = sbox.repo_url + '/branch'
svntest.actions.run_and_verify_svn(None, [],
'cp', sbox.wc_dir, branch_url,
'-m', '')
expected_disk = svntest.wc.State('', {
'B/E/alpha' : Item(contents="This is the file 'alpha'.\n"),
'B/E/beta' : Item(contents="This is the file 'beta'.\n"),
'B/lambda' : Item(contents="This is the file 'lambda'.\n"),
'B/F' : Item(),
'iota' : Item(contents="This is the file 'iota'.\n"),
})
expected_status = svntest.wc.State(sbox.wc_dir, {
'' : Item(status=' ', wc_rev='2'),
'B' : Item(status='R ', copied='+', treeconflict='C', wc_rev='-'),
'B/lambda' : Item(status=' ', copied='+', wc_rev='-'),
'B/F' : Item(status=' ', copied='+', wc_rev='-'),
'B/E' : Item(status=' ', copied='+', wc_rev='-'),
'B/E/beta' : Item(status=' ', copied='+', wc_rev='-'),
'B/E/alpha' : Item(status=' ', copied='+', wc_rev='-'),
'A' : Item(status='! ', treeconflict='C'),
'iota' : Item(status=' ', wc_rev='2'),
})
svntest.actions.run_and_verify_switch(sbox.wc_dir, sbox.ospath(''), branch_url,
None, expected_disk, expected_status)
| true | true |
f71ec7be2b5c89172f80a586dc9687e6c94dc760 | 14,797 | py | Python | l3/plot.py | dominique120/12-steps-navier-stokes | 3e195bf7f7895f83f5f2248ef48dc13b76e8b5de | [
"MIT"
] | null | null | null | l3/plot.py | dominique120/12-steps-navier-stokes | 3e195bf7f7895f83f5f2248ef48dc13b76e8b5de | [
"MIT"
] | null | null | null | l3/plot.py | dominique120/12-steps-navier-stokes | 3e195bf7f7895f83f5f2248ef48dc13b76e8b5de | [
"MIT"
] | null | null | null | #!/usr/bin/env python
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
matplotlib.rcParams["font.family"] = "Serif"
matplotlib.rcParams["font.size"] = 10
matplotlib.rcParams["axes.labelsize"] = 10
matplotlib.rcParams["xtick.labelsize"] = 10
matplotlib.rcParams["ytick.labelsize"] = 10
matplotlib.rcParams["legend.fontsize"] = 10
fig = plt.figure(facecolor="white")
ax = fig.gca()
ax.grid()
ax.set_axisbelow(True)
ax.set_xlabel("timestep")
ax.set_title("Plot of u over time")
x = np.array([0.0000000000000000E+00,0.6283185482025147E-01,0.1256637096405029E+00,0.1884955644607544E+00,0.2513274192810059E+00,0.3141592741012573E+00,0.3769911289215088E+00,0.4398229837417603E+00,0.5026548385620118E+00,0.5654866933822632E+00,0.6283185482025146E+00,0.6911504030227662E+00,0.7539822578430176E+00,0.8168141126632691E+00,0.8796459674835206E+00,0.9424778223037721E+00,0.1005309677124024E+01,0.1068141531944275E+01,0.1130973386764526E+01,0.1193805241584778E+01,0.1256637096405029E+01,0.1319468951225281E+01,0.1382300806045532E+01,0.1445132660865784E+01,0.1507964515686035E+01,0.1570796370506287E+01,0.1633628225326538E+01,0.1696460080146790E+01,0.1759291934967041E+01,0.1822123789787293E+01,0.1884955644607544E+01,0.1947787499427795E+01,0.2010619354248047E+01,0.2073451209068299E+01,0.2136283063888550E+01,0.2199114918708801E+01,0.2261946773529053E+01,0.2324778628349304E+01,0.2387610483169556E+01,0.2450442337989807E+01,0.2513274192810059E+01,0.2576106047630310E+01,0.2638937902450562E+01,0.2701769757270813E+01,0.2764601612091065E+01,0.2827433466911316E+01,0.2890265321731567E+01,0.2953097176551819E+01,0.3015929031372071E+01,0.3078760886192322E+01,0.3141592741012574E+01,0.3204424595832825E+01,0.3267256450653076E+01,0.3330088305473328E+01,0.3392920160293579E+01,0.3455752015113831E+01,0.3518583869934083E+01,0.3581415724754334E+01,0.3644247579574585E+01,0.3707079434394837E+01,0.3769911289215088E+01,0.3832743144035340E+01,0.3895574998855591E+01,0.3958406853675843E+01,0.4021238708496094E+01,0.4084070563316345E+01,0.4146902418136597E+01,0.4209734272956848E+01,0.4272566127777100E+01,0.4335397982597351E+01,0.4398229837417603E+01,0.4461061692237855E+01,0.4523893547058106E+01,0.4586725401878358E+01,0.4649557256698609E+01,0.4712389111518860E+01,0.4775220966339112E+01,0.4838052821159363E+01,0.4900884675979615E+01,0.4963716530799866E+01,0.5026548385620117E+01,0.5089380240440369E+01,0.5152212095260620E+01,0.5215043950080872E+01,0.5277875804901123E+01,0.5340707659721375E+01,0.5403539514541627E+01,0.5466371369361878E+01,0.5529203224182130E+01,0.5592035079002381E+01,0.5654866933822633E+01,0.5717698788642884E+01,0.5780530643463135E+01,0.5843362498283387E+01,0.5906194353103638E+01,0.5969026207923890E+01,0.6031858062744141E+01,0.6094689917564392E+01,0.6157521772384644E+01,0.6220353627204895E+01,0.6283185482025147E+01])
y = np.array([0.2727630972961541E+01,0.2771311269919911E+01,0.2814989285728558E+01,0.2858665849416731E+01,0.2902342189881139E+01,0.2946020124173951E+01,0.2989702300073073E+01,0.3033392493052979E+01,0.3077095949208672E+01,0.3120819754014078E+01,0.3164573192804217E+01,0.3208368054247055E+01,0.3252218815413303E+01,0.3296142639622408E+01,0.3340159119445302E+01,0.3384289709865549E+01,0.3428556821944811E+01,0.3472982584474947E+01,0.3517587326469731E+01,0.3562387880944926E+01,0.3607395852618360E+01,0.3652616021029425E+01,0.3698045059580436E+01,0.3743670736415854E+01,0.3789471724920397E+01,0.3835418093903271E+01,0.3881472477500543E+01,0.3927591851755142E+01,0.3973729778475908E+01,0.4019838926034867E+01,0.4065873647669411E+01,0.4111792394000463E+01,0.4157559757884302E+01,0.4203147993161763E+01,0.4248537908375252E+01,0.4293719104179188E+01,0.4338689590214909E+01,0.4383454875298046E+01,0.4428026667143412E+01,0.4472421340415166E+01,0.4516658333683488E+01,0.4560758619056785E+01,0.4604743357507668E+01,0.4648632814346878E+01,0.4692445569290600E+01,0.4736198019615245E+01,0.4779904146861352E+01,0.4823575499325555E+01,0.4867221334189969E+01,0.4910848863158982E+01,0.4954463551611633E+01,0.4998069430935068E+01,0.5041669394396616E+01,0.5085265456440628E+01,0.5128858961580264E+01,0.5172450729440230E+01,0.5216041112137861E+01,0.5259629908416969E+01,0.5303216001391483E+01,0.5346796407577470E+01,0.5390364016956128E+01,0.5433902378991060E+01,0.5477373796984951E+01,0.5520692266994955E+01,0.5563662148157763E+01,0.5605839543752792E+01,0.5646220063907935E+01,0.5682539434358074E+01,0.5709723746298654E+01,0.5716534126277256E+01,0.5678680622132141E+01,0.5546490436649654E+01,0.5230962954575717E+01,0.4621610920896421E+01,0.3723767827144663E+01,0.2832739161616575E+01,0.2256379481270987E+01,0.1991598349454642E+01,0.1903149542455375E+01,0.1893699549180197E+01,0.1916482260037323E+01,0.1951969308086713E+01,0.1992407559120753E+01,0.2034770635503815E+01,0.2077882429362218E+01,0.2121285847116006E+01,0.2164802982375364E+01,0.2208364483935747E+01,0.2251943284280958E+01,0.2295528801406756E+01,0.2339116851026652E+01,0.2382705607390911E+01,0.2426293674842139E+01,0.2469878109399315E+01,0.2513449360728478E+01,0.2556974307303974E+01,0.2600337207538654E+01,0.2643133334588122E+01,0.2683947826158554E+01,0.2717840319099818E+01,0.2727630972961541E+01])
ax.plot(x,y,"b-o",linewidth=1,markersize=3,label="value of u")
x = np.array([0.0000000000000000E+00,0.6283185482025147E-01,0.1256637096405029E+00,0.1884955644607544E+00,0.2513274192810059E+00,0.3141592741012573E+00,0.3769911289215088E+00,0.4398229837417603E+00,0.5026548385620118E+00,0.5654866933822632E+00,0.6283185482025146E+00,0.6911504030227662E+00,0.7539822578430176E+00,0.8168141126632691E+00,0.8796459674835206E+00,0.9424778223037721E+00,0.1005309677124024E+01,0.1068141531944275E+01,0.1130973386764526E+01,0.1193805241584778E+01,0.1256637096405029E+01,0.1319468951225281E+01,0.1382300806045532E+01,0.1445132660865784E+01,0.1507964515686035E+01,0.1570796370506287E+01,0.1633628225326538E+01,0.1696460080146790E+01,0.1759291934967041E+01,0.1822123789787293E+01,0.1884955644607544E+01,0.1947787499427795E+01,0.2010619354248047E+01,0.2073451209068299E+01,0.2136283063888550E+01,0.2199114918708801E+01,0.2261946773529053E+01,0.2324778628349304E+01,0.2387610483169556E+01,0.2450442337989807E+01,0.2513274192810059E+01,0.2576106047630310E+01,0.2638937902450562E+01,0.2701769757270813E+01,0.2764601612091065E+01,0.2827433466911316E+01,0.2890265321731567E+01,0.2953097176551819E+01,0.3015929031372071E+01,0.3078760886192322E+01,0.3141592741012574E+01,0.3204424595832825E+01,0.3267256450653076E+01,0.3330088305473328E+01,0.3392920160293579E+01,0.3455752015113831E+01,0.3518583869934083E+01,0.3581415724754334E+01,0.3644247579574585E+01,0.3707079434394837E+01,0.3769911289215088E+01,0.3832743144035340E+01,0.3895574998855591E+01,0.3958406853675843E+01,0.4021238708496094E+01,0.4084070563316345E+01,0.4146902418136597E+01,0.4209734272956848E+01,0.4272566127777100E+01,0.4335397982597351E+01,0.4398229837417603E+01,0.4461061692237855E+01,0.4523893547058106E+01,0.4586725401878358E+01,0.4649557256698609E+01,0.4712389111518860E+01,0.4775220966339112E+01,0.4838052821159363E+01,0.4900884675979615E+01,0.4963716530799866E+01,0.5026548385620117E+01,0.5089380240440369E+01,0.5152212095260620E+01,0.5215043950080872E+01,0.5277875804901123E+01,0.5340707659721375E+01,0.5403539514541627E+01,0.5466371369361878E+01,0.5529203224182130E+01,0.5592035079002381E+01,0.5654866933822633E+01,0.5717698788642884E+01,0.5780530643463135E+01,0.5843362498283387E+01,0.5906194353103638E+01,0.5969026207923890E+01,0.6031858062744141E+01,0.6094689917564392E+01,0.6157521772384644E+01,0.6220353627204895E+01,0.6283185482025147E+01])
y = np.array([0.2736023075220947E+01,0.2779837164545836E+01,0.2823649045509257E+01,0.2867459600164973E+01,0.2911270141699394E+01,0.2955082615916199E+01,0.2998899859719867E+01,0.3042725915794919E+01,0.3086566393194455E+01,0.3130428850827616E+01,0.3174323165831663E+01,0.3218261833478449E+01,0.3262260132502555E+01,0.3306336083104996E+01,0.3350510127978347E+01,0.3394804482298916E+01,0.3439242127774091E+01,0.3483845467038330E+01,0.3528634703635678E+01,0.3573626062661065E+01,0.3618830009499543E+01,0.3664249650698246E+01,0.3709879505336173E+01,0.3755704814144340E+01,0.3801701508065813E+01,0.3847836893073130E+01,0.3894071032297831E+01,0.3940358730093159E+01,0.3986651955855555E+01,0.4032902497084348E+01,0.4079064607364045E+01,0.4125097418387960E+01,0.4170966914845689E+01,0.4216647322613483E+01,0.4262121827104219E+01,0.4307382610971990E+01,0.4352430269224261E+01,0.4397272716621099E+01,0.4441923740614247E+01,0.4486401369648815E+01,0.4530726221531711E+01,0.4574919973083936E+01,0.4619004056115172E+01,0.4662998642689007E+01,0.4706921941299771E+01,0.4750789790286371E+01,0.4794615508938744E+01,0.4838409951549534E+01,0.4882181704496955E+01,0.4925937369356523E+01,0.4969681883387789E+01,0.5013418839774710E+01,0.5057150781228563E+01,0.5100879449862473E+01,0.5144605981558732E+01,0.5188331031510512E+01,0.5232054803586364E+01,0.5275776916902560E+01,0.5319495949105908E+01,0.5363208279897092E+01,0.5406905366405161E+01,0.5450567464917623E+01,0.5494149280945163E+01,0.5537547298853238E+01,0.5580525607569766E+01,0.5622547965846030E+01,0.5662399018478118E+01,0.5697335399227216E+01,0.5721207407206272E+01,0.5720417383492038E+01,0.5665776539520547E+01,0.5498721631018602E+01,0.5119674798334109E+01,0.4425781384204654E+01,0.3486434650868969E+01,0.2650763109373032E+01,0.2160209517510828E+01,0.1951643566665702E+01,0.1890046900079447E+01,0.1891964276681970E+01,0.1919388886466590E+01,0.1956790527577951E+01,0.1998063161314609E+01,0.2040835692245308E+01,0.2084189902896408E+01,0.2127769988406669E+01,0.2171437866684961E+01,0.2215139874588584E+01,0.2258855136134518E+01,0.2302575516435228E+01,0.2346297799758990E+01,0.2390020545982250E+01,0.2433742519833124E+01,0.2477460863766740E+01,0.2521166124486849E+01,0.2564825375231745E+01,0.2608323275028774E+01,0.2651255604219204E+01,0.2692206176828820E+01,0.2726219167417408E+01,0.2736023075220947E+01])
ax.plot(x,y,"b-o",linewidth=1,markersize=3,label="value of un")
x = np.array([0.0000000000000000E+00,0.6283185482025147E-01,0.1256637096405029E+00,0.1884955644607544E+00,0.2513274192810059E+00,0.3141592741012573E+00,0.3769911289215088E+00,0.4398229837417603E+00,0.5026548385620118E+00,0.5654866933822632E+00,0.6283185482025146E+00,0.6911504030227662E+00,0.7539822578430176E+00,0.8168141126632691E+00,0.8796459674835206E+00,0.9424778223037721E+00,0.1005309677124024E+01,0.1068141531944275E+01,0.1130973386764526E+01,0.1193805241584778E+01,0.1256637096405029E+01,0.1319468951225281E+01,0.1382300806045532E+01,0.1445132660865784E+01,0.1507964515686035E+01,0.1570796370506287E+01,0.1633628225326538E+01,0.1696460080146790E+01,0.1759291934967041E+01,0.1822123789787293E+01,0.1884955644607544E+01,0.1947787499427795E+01,0.2010619354248047E+01,0.2073451209068299E+01,0.2136283063888550E+01,0.2199114918708801E+01,0.2261946773529053E+01,0.2324778628349304E+01,0.2387610483169556E+01,0.2450442337989807E+01,0.2513274192810059E+01,0.2576106047630310E+01,0.2638937902450562E+01,0.2701769757270813E+01,0.2764601612091065E+01,0.2827433466911316E+01,0.2890265321731567E+01,0.2953097176551819E+01,0.3015929031372071E+01,0.3078760886192322E+01,0.3141592741012574E+01,0.3204424595832825E+01,0.3267256450653076E+01,0.3330088305473328E+01,0.3392920160293579E+01,0.3455752015113831E+01,0.3518583869934083E+01,0.3581415724754334E+01,0.3644247579574585E+01,0.3707079434394837E+01,0.3769911289215088E+01,0.3832743144035340E+01,0.3895574998855591E+01,0.3958406853675843E+01,0.4021238708496094E+01,0.4084070563316345E+01,0.4146902418136597E+01,0.4209734272956848E+01,0.4272566127777100E+01,0.4335397982597351E+01,0.4398229837417603E+01,0.4461061692237855E+01,0.4523893547058106E+01,0.4586725401878358E+01,0.4649557256698609E+01,0.4712389111518860E+01,0.4775220966339112E+01,0.4838052821159363E+01,0.4900884675979615E+01,0.4963716530799866E+01,0.5026548385620117E+01,0.5089380240440369E+01,0.5152212095260620E+01,0.5215043950080872E+01,0.5277875804901123E+01,0.5340707659721375E+01,0.5403539514541627E+01,0.5466371369361878E+01,0.5529203224182130E+01,0.5592035079002381E+01,0.5654866933822633E+01,0.5717698788642884E+01,0.5780530643463135E+01,0.5843362498283387E+01,0.5906194353103638E+01,0.5969026207923890E+01,0.6031858062744141E+01,0.6094689917564392E+01,0.6157521772384644E+01,0.6220353627204895E+01,0.6283185482025147E+01])
y = np.array([0.2778119282693222E+01,0.2821757879554102E+01,0.2865396476414983E+01,0.2909035073275863E+01,0.2952673670136744E+01,0.2996312266997624E+01,0.3039950863858505E+01,0.3083589460719385E+01,0.3127228057580266E+01,0.3170866654441146E+01,0.3214505251302026E+01,0.3258143848162907E+01,0.3301782445023787E+01,0.3345421041884667E+01,0.3389059638745548E+01,0.3432698235606428E+01,0.3476336832467309E+01,0.3519975429328189E+01,0.3563614026189069E+01,0.3607252623049950E+01,0.3650891219910831E+01,0.3694529816771711E+01,0.3738168413632591E+01,0.3781807010493472E+01,0.3825445607354352E+01,0.3869084204215233E+01,0.3912722801076113E+01,0.3956361397936993E+01,0.3999999994797874E+01,0.4043638591658754E+01,0.4087277188519635E+01,0.4130915785380515E+01,0.4174554382241396E+01,0.4218192979102276E+01,0.4261831575963156E+01,0.4305470172824037E+01,0.4349108769684917E+01,0.4392747366545797E+01,0.4436385963406678E+01,0.4480024560267559E+01,0.4523663157128439E+01,0.4567301753989319E+01,0.4610940350850200E+01,0.4654578947711080E+01,0.4698217544571961E+01,0.4741856141432841E+01,0.4785494738293721E+01,0.4829133335154602E+01,0.4872771932015482E+01,0.4916410528876363E+01,0.4960049125737243E+01,0.5003687722598123E+01,0.5047326319459004E+01,0.5090964916319884E+01,0.5134603513180765E+01,0.5178242110041645E+01,0.5221880706902526E+01,0.5265519303763406E+01,0.5309157900624286E+01,0.5352796497485167E+01,0.5396435094346045E+01,0.5440073691206913E+01,0.5483712288067701E+01,0.5527350884927927E+01,0.5570989481784174E+01,0.5614628078612207E+01,0.5658266675240245E+01,0.5701905270450581E+01,0.5745543855611327E+01,0.5789182369533984E+01,0.5832820378474495E+01,0.5876454807775165E+01,0.5920063862467133E+01,0.5963493056546985E+01,0.6005647845264028E+01,0.6038797098376929E+01,0.6009502348067070E+01,0.5598786339877734E+01,0.3999997337339063E+01,0.2401211415134112E+01,0.1990497346934744E+01,0.1961202951946962E+01,0.1994352257161830E+01,0.2036507053269433E+01,0.2079936248392683E+01,0.2123545303231861E+01,0.2167179732553298E+01,0.2210817741496738E+01,0.2254456255419809E+01,0.2298094840580612E+01,0.2341733435790958E+01,0.2385372032418996E+01,0.2429010629247029E+01,0.2472649226103276E+01,0.2516287822963503E+01,0.2559926419824292E+01,0.2603565016685159E+01,0.2647203613546037E+01,0.2690842210406918E+01,0.2734480807267798E+01,0.2778119404128678E+01])
ax.plot(x,y,"b-o",linewidth=1,markersize=3,label="value of u_analytical")
ax.legend(loc="best")
plt.savefig("plot.png", dpi=320)
| 369.925 | 2,338 | 0.859904 |
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
matplotlib.rcParams["font.family"] = "Serif"
matplotlib.rcParams["font.size"] = 10
matplotlib.rcParams["axes.labelsize"] = 10
matplotlib.rcParams["xtick.labelsize"] = 10
matplotlib.rcParams["ytick.labelsize"] = 10
matplotlib.rcParams["legend.fontsize"] = 10
fig = plt.figure(facecolor="white")
ax = fig.gca()
ax.grid()
ax.set_axisbelow(True)
ax.set_xlabel("timestep")
ax.set_title("Plot of u over time")
x = np.array([0.0000000000000000E+00,0.6283185482025147E-01,0.1256637096405029E+00,0.1884955644607544E+00,0.2513274192810059E+00,0.3141592741012573E+00,0.3769911289215088E+00,0.4398229837417603E+00,0.5026548385620118E+00,0.5654866933822632E+00,0.6283185482025146E+00,0.6911504030227662E+00,0.7539822578430176E+00,0.8168141126632691E+00,0.8796459674835206E+00,0.9424778223037721E+00,0.1005309677124024E+01,0.1068141531944275E+01,0.1130973386764526E+01,0.1193805241584778E+01,0.1256637096405029E+01,0.1319468951225281E+01,0.1382300806045532E+01,0.1445132660865784E+01,0.1507964515686035E+01,0.1570796370506287E+01,0.1633628225326538E+01,0.1696460080146790E+01,0.1759291934967041E+01,0.1822123789787293E+01,0.1884955644607544E+01,0.1947787499427795E+01,0.2010619354248047E+01,0.2073451209068299E+01,0.2136283063888550E+01,0.2199114918708801E+01,0.2261946773529053E+01,0.2324778628349304E+01,0.2387610483169556E+01,0.2450442337989807E+01,0.2513274192810059E+01,0.2576106047630310E+01,0.2638937902450562E+01,0.2701769757270813E+01,0.2764601612091065E+01,0.2827433466911316E+01,0.2890265321731567E+01,0.2953097176551819E+01,0.3015929031372071E+01,0.3078760886192322E+01,0.3141592741012574E+01,0.3204424595832825E+01,0.3267256450653076E+01,0.3330088305473328E+01,0.3392920160293579E+01,0.3455752015113831E+01,0.3518583869934083E+01,0.3581415724754334E+01,0.3644247579574585E+01,0.3707079434394837E+01,0.3769911289215088E+01,0.3832743144035340E+01,0.3895574998855591E+01,0.3958406853675843E+01,0.4021238708496094E+01,0.4084070563316345E+01,0.4146902418136597E+01,0.4209734272956848E+01,0.4272566127777100E+01,0.4335397982597351E+01,0.4398229837417603E+01,0.4461061692237855E+01,0.4523893547058106E+01,0.4586725401878358E+01,0.4649557256698609E+01,0.4712389111518860E+01,0.4775220966339112E+01,0.4838052821159363E+01,0.4900884675979615E+01,0.4963716530799866E+01,0.5026548385620117E+01,0.5089380240440369E+01,0.5152212095260620E+01,0.5215043950080872E+01,0.5277875804901123E+01,0.5340707659721375E+01,0.5403539514541627E+01,0.5466371369361878E+01,0.5529203224182130E+01,0.5592035079002381E+01,0.5654866933822633E+01,0.5717698788642884E+01,0.5780530643463135E+01,0.5843362498283387E+01,0.5906194353103638E+01,0.5969026207923890E+01,0.6031858062744141E+01,0.6094689917564392E+01,0.6157521772384644E+01,0.6220353627204895E+01,0.6283185482025147E+01])
y = np.array([0.2727630972961541E+01,0.2771311269919911E+01,0.2814989285728558E+01,0.2858665849416731E+01,0.2902342189881139E+01,0.2946020124173951E+01,0.2989702300073073E+01,0.3033392493052979E+01,0.3077095949208672E+01,0.3120819754014078E+01,0.3164573192804217E+01,0.3208368054247055E+01,0.3252218815413303E+01,0.3296142639622408E+01,0.3340159119445302E+01,0.3384289709865549E+01,0.3428556821944811E+01,0.3472982584474947E+01,0.3517587326469731E+01,0.3562387880944926E+01,0.3607395852618360E+01,0.3652616021029425E+01,0.3698045059580436E+01,0.3743670736415854E+01,0.3789471724920397E+01,0.3835418093903271E+01,0.3881472477500543E+01,0.3927591851755142E+01,0.3973729778475908E+01,0.4019838926034867E+01,0.4065873647669411E+01,0.4111792394000463E+01,0.4157559757884302E+01,0.4203147993161763E+01,0.4248537908375252E+01,0.4293719104179188E+01,0.4338689590214909E+01,0.4383454875298046E+01,0.4428026667143412E+01,0.4472421340415166E+01,0.4516658333683488E+01,0.4560758619056785E+01,0.4604743357507668E+01,0.4648632814346878E+01,0.4692445569290600E+01,0.4736198019615245E+01,0.4779904146861352E+01,0.4823575499325555E+01,0.4867221334189969E+01,0.4910848863158982E+01,0.4954463551611633E+01,0.4998069430935068E+01,0.5041669394396616E+01,0.5085265456440628E+01,0.5128858961580264E+01,0.5172450729440230E+01,0.5216041112137861E+01,0.5259629908416969E+01,0.5303216001391483E+01,0.5346796407577470E+01,0.5390364016956128E+01,0.5433902378991060E+01,0.5477373796984951E+01,0.5520692266994955E+01,0.5563662148157763E+01,0.5605839543752792E+01,0.5646220063907935E+01,0.5682539434358074E+01,0.5709723746298654E+01,0.5716534126277256E+01,0.5678680622132141E+01,0.5546490436649654E+01,0.5230962954575717E+01,0.4621610920896421E+01,0.3723767827144663E+01,0.2832739161616575E+01,0.2256379481270987E+01,0.1991598349454642E+01,0.1903149542455375E+01,0.1893699549180197E+01,0.1916482260037323E+01,0.1951969308086713E+01,0.1992407559120753E+01,0.2034770635503815E+01,0.2077882429362218E+01,0.2121285847116006E+01,0.2164802982375364E+01,0.2208364483935747E+01,0.2251943284280958E+01,0.2295528801406756E+01,0.2339116851026652E+01,0.2382705607390911E+01,0.2426293674842139E+01,0.2469878109399315E+01,0.2513449360728478E+01,0.2556974307303974E+01,0.2600337207538654E+01,0.2643133334588122E+01,0.2683947826158554E+01,0.2717840319099818E+01,0.2727630972961541E+01])
ax.plot(x,y,"b-o",linewidth=1,markersize=3,label="value of u")
x = np.array([0.0000000000000000E+00,0.6283185482025147E-01,0.1256637096405029E+00,0.1884955644607544E+00,0.2513274192810059E+00,0.3141592741012573E+00,0.3769911289215088E+00,0.4398229837417603E+00,0.5026548385620118E+00,0.5654866933822632E+00,0.6283185482025146E+00,0.6911504030227662E+00,0.7539822578430176E+00,0.8168141126632691E+00,0.8796459674835206E+00,0.9424778223037721E+00,0.1005309677124024E+01,0.1068141531944275E+01,0.1130973386764526E+01,0.1193805241584778E+01,0.1256637096405029E+01,0.1319468951225281E+01,0.1382300806045532E+01,0.1445132660865784E+01,0.1507964515686035E+01,0.1570796370506287E+01,0.1633628225326538E+01,0.1696460080146790E+01,0.1759291934967041E+01,0.1822123789787293E+01,0.1884955644607544E+01,0.1947787499427795E+01,0.2010619354248047E+01,0.2073451209068299E+01,0.2136283063888550E+01,0.2199114918708801E+01,0.2261946773529053E+01,0.2324778628349304E+01,0.2387610483169556E+01,0.2450442337989807E+01,0.2513274192810059E+01,0.2576106047630310E+01,0.2638937902450562E+01,0.2701769757270813E+01,0.2764601612091065E+01,0.2827433466911316E+01,0.2890265321731567E+01,0.2953097176551819E+01,0.3015929031372071E+01,0.3078760886192322E+01,0.3141592741012574E+01,0.3204424595832825E+01,0.3267256450653076E+01,0.3330088305473328E+01,0.3392920160293579E+01,0.3455752015113831E+01,0.3518583869934083E+01,0.3581415724754334E+01,0.3644247579574585E+01,0.3707079434394837E+01,0.3769911289215088E+01,0.3832743144035340E+01,0.3895574998855591E+01,0.3958406853675843E+01,0.4021238708496094E+01,0.4084070563316345E+01,0.4146902418136597E+01,0.4209734272956848E+01,0.4272566127777100E+01,0.4335397982597351E+01,0.4398229837417603E+01,0.4461061692237855E+01,0.4523893547058106E+01,0.4586725401878358E+01,0.4649557256698609E+01,0.4712389111518860E+01,0.4775220966339112E+01,0.4838052821159363E+01,0.4900884675979615E+01,0.4963716530799866E+01,0.5026548385620117E+01,0.5089380240440369E+01,0.5152212095260620E+01,0.5215043950080872E+01,0.5277875804901123E+01,0.5340707659721375E+01,0.5403539514541627E+01,0.5466371369361878E+01,0.5529203224182130E+01,0.5592035079002381E+01,0.5654866933822633E+01,0.5717698788642884E+01,0.5780530643463135E+01,0.5843362498283387E+01,0.5906194353103638E+01,0.5969026207923890E+01,0.6031858062744141E+01,0.6094689917564392E+01,0.6157521772384644E+01,0.6220353627204895E+01,0.6283185482025147E+01])
y = np.array([0.2736023075220947E+01,0.2779837164545836E+01,0.2823649045509257E+01,0.2867459600164973E+01,0.2911270141699394E+01,0.2955082615916199E+01,0.2998899859719867E+01,0.3042725915794919E+01,0.3086566393194455E+01,0.3130428850827616E+01,0.3174323165831663E+01,0.3218261833478449E+01,0.3262260132502555E+01,0.3306336083104996E+01,0.3350510127978347E+01,0.3394804482298916E+01,0.3439242127774091E+01,0.3483845467038330E+01,0.3528634703635678E+01,0.3573626062661065E+01,0.3618830009499543E+01,0.3664249650698246E+01,0.3709879505336173E+01,0.3755704814144340E+01,0.3801701508065813E+01,0.3847836893073130E+01,0.3894071032297831E+01,0.3940358730093159E+01,0.3986651955855555E+01,0.4032902497084348E+01,0.4079064607364045E+01,0.4125097418387960E+01,0.4170966914845689E+01,0.4216647322613483E+01,0.4262121827104219E+01,0.4307382610971990E+01,0.4352430269224261E+01,0.4397272716621099E+01,0.4441923740614247E+01,0.4486401369648815E+01,0.4530726221531711E+01,0.4574919973083936E+01,0.4619004056115172E+01,0.4662998642689007E+01,0.4706921941299771E+01,0.4750789790286371E+01,0.4794615508938744E+01,0.4838409951549534E+01,0.4882181704496955E+01,0.4925937369356523E+01,0.4969681883387789E+01,0.5013418839774710E+01,0.5057150781228563E+01,0.5100879449862473E+01,0.5144605981558732E+01,0.5188331031510512E+01,0.5232054803586364E+01,0.5275776916902560E+01,0.5319495949105908E+01,0.5363208279897092E+01,0.5406905366405161E+01,0.5450567464917623E+01,0.5494149280945163E+01,0.5537547298853238E+01,0.5580525607569766E+01,0.5622547965846030E+01,0.5662399018478118E+01,0.5697335399227216E+01,0.5721207407206272E+01,0.5720417383492038E+01,0.5665776539520547E+01,0.5498721631018602E+01,0.5119674798334109E+01,0.4425781384204654E+01,0.3486434650868969E+01,0.2650763109373032E+01,0.2160209517510828E+01,0.1951643566665702E+01,0.1890046900079447E+01,0.1891964276681970E+01,0.1919388886466590E+01,0.1956790527577951E+01,0.1998063161314609E+01,0.2040835692245308E+01,0.2084189902896408E+01,0.2127769988406669E+01,0.2171437866684961E+01,0.2215139874588584E+01,0.2258855136134518E+01,0.2302575516435228E+01,0.2346297799758990E+01,0.2390020545982250E+01,0.2433742519833124E+01,0.2477460863766740E+01,0.2521166124486849E+01,0.2564825375231745E+01,0.2608323275028774E+01,0.2651255604219204E+01,0.2692206176828820E+01,0.2726219167417408E+01,0.2736023075220947E+01])
ax.plot(x,y,"b-o",linewidth=1,markersize=3,label="value of un")
x = np.array([0.0000000000000000E+00,0.6283185482025147E-01,0.1256637096405029E+00,0.1884955644607544E+00,0.2513274192810059E+00,0.3141592741012573E+00,0.3769911289215088E+00,0.4398229837417603E+00,0.5026548385620118E+00,0.5654866933822632E+00,0.6283185482025146E+00,0.6911504030227662E+00,0.7539822578430176E+00,0.8168141126632691E+00,0.8796459674835206E+00,0.9424778223037721E+00,0.1005309677124024E+01,0.1068141531944275E+01,0.1130973386764526E+01,0.1193805241584778E+01,0.1256637096405029E+01,0.1319468951225281E+01,0.1382300806045532E+01,0.1445132660865784E+01,0.1507964515686035E+01,0.1570796370506287E+01,0.1633628225326538E+01,0.1696460080146790E+01,0.1759291934967041E+01,0.1822123789787293E+01,0.1884955644607544E+01,0.1947787499427795E+01,0.2010619354248047E+01,0.2073451209068299E+01,0.2136283063888550E+01,0.2199114918708801E+01,0.2261946773529053E+01,0.2324778628349304E+01,0.2387610483169556E+01,0.2450442337989807E+01,0.2513274192810059E+01,0.2576106047630310E+01,0.2638937902450562E+01,0.2701769757270813E+01,0.2764601612091065E+01,0.2827433466911316E+01,0.2890265321731567E+01,0.2953097176551819E+01,0.3015929031372071E+01,0.3078760886192322E+01,0.3141592741012574E+01,0.3204424595832825E+01,0.3267256450653076E+01,0.3330088305473328E+01,0.3392920160293579E+01,0.3455752015113831E+01,0.3518583869934083E+01,0.3581415724754334E+01,0.3644247579574585E+01,0.3707079434394837E+01,0.3769911289215088E+01,0.3832743144035340E+01,0.3895574998855591E+01,0.3958406853675843E+01,0.4021238708496094E+01,0.4084070563316345E+01,0.4146902418136597E+01,0.4209734272956848E+01,0.4272566127777100E+01,0.4335397982597351E+01,0.4398229837417603E+01,0.4461061692237855E+01,0.4523893547058106E+01,0.4586725401878358E+01,0.4649557256698609E+01,0.4712389111518860E+01,0.4775220966339112E+01,0.4838052821159363E+01,0.4900884675979615E+01,0.4963716530799866E+01,0.5026548385620117E+01,0.5089380240440369E+01,0.5152212095260620E+01,0.5215043950080872E+01,0.5277875804901123E+01,0.5340707659721375E+01,0.5403539514541627E+01,0.5466371369361878E+01,0.5529203224182130E+01,0.5592035079002381E+01,0.5654866933822633E+01,0.5717698788642884E+01,0.5780530643463135E+01,0.5843362498283387E+01,0.5906194353103638E+01,0.5969026207923890E+01,0.6031858062744141E+01,0.6094689917564392E+01,0.6157521772384644E+01,0.6220353627204895E+01,0.6283185482025147E+01])
y = np.array([0.2778119282693222E+01,0.2821757879554102E+01,0.2865396476414983E+01,0.2909035073275863E+01,0.2952673670136744E+01,0.2996312266997624E+01,0.3039950863858505E+01,0.3083589460719385E+01,0.3127228057580266E+01,0.3170866654441146E+01,0.3214505251302026E+01,0.3258143848162907E+01,0.3301782445023787E+01,0.3345421041884667E+01,0.3389059638745548E+01,0.3432698235606428E+01,0.3476336832467309E+01,0.3519975429328189E+01,0.3563614026189069E+01,0.3607252623049950E+01,0.3650891219910831E+01,0.3694529816771711E+01,0.3738168413632591E+01,0.3781807010493472E+01,0.3825445607354352E+01,0.3869084204215233E+01,0.3912722801076113E+01,0.3956361397936993E+01,0.3999999994797874E+01,0.4043638591658754E+01,0.4087277188519635E+01,0.4130915785380515E+01,0.4174554382241396E+01,0.4218192979102276E+01,0.4261831575963156E+01,0.4305470172824037E+01,0.4349108769684917E+01,0.4392747366545797E+01,0.4436385963406678E+01,0.4480024560267559E+01,0.4523663157128439E+01,0.4567301753989319E+01,0.4610940350850200E+01,0.4654578947711080E+01,0.4698217544571961E+01,0.4741856141432841E+01,0.4785494738293721E+01,0.4829133335154602E+01,0.4872771932015482E+01,0.4916410528876363E+01,0.4960049125737243E+01,0.5003687722598123E+01,0.5047326319459004E+01,0.5090964916319884E+01,0.5134603513180765E+01,0.5178242110041645E+01,0.5221880706902526E+01,0.5265519303763406E+01,0.5309157900624286E+01,0.5352796497485167E+01,0.5396435094346045E+01,0.5440073691206913E+01,0.5483712288067701E+01,0.5527350884927927E+01,0.5570989481784174E+01,0.5614628078612207E+01,0.5658266675240245E+01,0.5701905270450581E+01,0.5745543855611327E+01,0.5789182369533984E+01,0.5832820378474495E+01,0.5876454807775165E+01,0.5920063862467133E+01,0.5963493056546985E+01,0.6005647845264028E+01,0.6038797098376929E+01,0.6009502348067070E+01,0.5598786339877734E+01,0.3999997337339063E+01,0.2401211415134112E+01,0.1990497346934744E+01,0.1961202951946962E+01,0.1994352257161830E+01,0.2036507053269433E+01,0.2079936248392683E+01,0.2123545303231861E+01,0.2167179732553298E+01,0.2210817741496738E+01,0.2254456255419809E+01,0.2298094840580612E+01,0.2341733435790958E+01,0.2385372032418996E+01,0.2429010629247029E+01,0.2472649226103276E+01,0.2516287822963503E+01,0.2559926419824292E+01,0.2603565016685159E+01,0.2647203613546037E+01,0.2690842210406918E+01,0.2734480807267798E+01,0.2778119404128678E+01])
ax.plot(x,y,"b-o",linewidth=1,markersize=3,label="value of u_analytical")
ax.legend(loc="best")
plt.savefig("plot.png", dpi=320)
| true | true |
f71ec8c0f4f79e7b5d54635479768ae036e669e3 | 251 | py | Python | CodeChef/JUNE20/EOEO.py | mishrakeshav/Competitive-Programming | b25dcfeec0fb9a9c71bf3a05644b619f4ca83dd2 | [
"MIT"
] | 2 | 2020-06-25T21:10:32.000Z | 2020-12-10T06:53:45.000Z | CodeChef/JUNE20/EOEO.py | mishrakeshav/Competitive-Programming | b25dcfeec0fb9a9c71bf3a05644b619f4ca83dd2 | [
"MIT"
] | null | null | null | CodeChef/JUNE20/EOEO.py | mishrakeshav/Competitive-Programming | b25dcfeec0fb9a9c71bf3a05644b619f4ca83dd2 | [
"MIT"
] | 3 | 2020-05-15T14:17:09.000Z | 2021-07-25T13:18:20.000Z |
if __name__ == '__main__':
for t in range(int(input())):
ts = int(input())
count = 0
while ts%2 == 0:
ts //= 2
if ts:
print(ts//2)
else:
print(0)
| 13.944444 | 33 | 0.346614 |
if __name__ == '__main__':
for t in range(int(input())):
ts = int(input())
count = 0
while ts%2 == 0:
ts //= 2
if ts:
print(ts//2)
else:
print(0)
| true | true |
f71ec943bd7cd66acb6291dadc6fac09bcc5cefa | 6,787 | py | Python | ryu/cmd/rpc_cli.py | umkcdcrg01/ryu_openflow | 37ed5b88f7d119344e07c95314a7450235c037a8 | [
"Apache-2.0"
] | 1 | 2019-05-21T12:05:57.000Z | 2019-05-21T12:05:57.000Z | ryu/cmd/rpc_cli.py | umkcdcrg01/ryu_openflow | 37ed5b88f7d119344e07c95314a7450235c037a8 | [
"Apache-2.0"
] | 1 | 2019-03-07T02:48:16.000Z | 2019-03-07T02:48:16.000Z | ryu/cmd/rpc_cli.py | umkcdcrg01/ryu_openflow | 37ed5b88f7d119344e07c95314a7450235c037a8 | [
"Apache-2.0"
] | 1 | 2019-03-10T09:32:45.000Z | 2019-03-10T09:32:45.000Z | #!/usr/bin/env python
#
# Copyright (C) 2013 Nippon Telegraph and Telephone Corporation.
# Copyright (C) 2013 YAMAMOTO Takashi <yamamoto at valinux co jp>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# a simple command line msgpack-rpc client
#
# a usage example:
# % PYTHONPATH=. ./bin/rpc-cli \
# --peers=echo-server=localhost:9999,hoge=localhost:9998
# (Cmd) request echo-server echo ["hoge"]
# RESULT hoge
# (Cmd) request echo-server notify ["notify-method", ["param1","param2"]]
# RESULT notify-method
# (Cmd)
# NOTIFICATION from echo-server ['notify-method', ['param1', 'param2']]
# (Cmd)
import ryu.contrib
ryu.contrib.update_module_path()
from ryu import cfg
import cmd
import signal
import socket
import sys
import termios
from ryu.lib import rpc
CONF = cfg.CONF
CONF.register_cli_opts([
# eg. rpc-cli --peers=hoge=localhost:9998,fuga=localhost:9999
cfg.ListOpt('peers', default=[], help='list of peers')
])
class Peer(object):
def __init__(self, name, addr):
self._name = name
self._addr = addr
self.client = None
try:
self.connect()
except:
pass
def connect(self):
self.client = None
s = socket.create_connection(self._addr)
self.client = rpc.Client(s, notification_callback=self.notification)
def try_to_connect(self, verbose=False):
if self.client:
return
try:
self.connect()
assert self.client
except Exception as e:
if verbose:
print("connection failure %s" % e)
raise EOFError
def notification(self, n):
print("NOTIFICATION from %s %s" % (self._name, n))
def call(self, method, params):
return self._do(lambda: self.client.call(method, params))
def send_notification(self, method, params):
self._do(lambda: self.client.send_notification(method, params))
def _do(self, f):
def g():
try:
return f()
except EOFError:
self.client = None
raise
self.try_to_connect(verbose=True)
try:
return g()
except EOFError:
print("disconnected. trying to connect...")
self.try_to_connect(verbose=True)
print("connected. retrying the request...")
return g()
peers = {}
def add_peer(name, host, port):
peers[name] = Peer(name, (host, port))
class Cmd(cmd.Cmd):
def __init__(self, *args, **kwargs):
self._in_onecmd = False
self._notification_check_interval = 1 # worth to be configurable?
self._saved_termios = None
cmd.Cmd.__init__(self, *args, **kwargs)
def _request(self, line, f):
args = line.split(None, 2)
try:
peer = args[0]
method = args[1]
params = eval(args[2])
except:
print("argument error")
return
try:
p = peers[peer]
except KeyError:
print("unknown peer %s" % peer)
return
try:
f(p, method, params)
except rpc.RPCError as e:
print("RPC ERROR %s" % e)
except EOFError:
print("disconnected")
def _complete_peer(self, text, line, _begidx, _endidx):
if len((line + 'x').split()) >= 3:
return []
return [name for name in peers if name.startswith(text)]
def do_request(self, line):
"""request <peer> <method> <params>
send a msgpack-rpc request and print a response.
<params> is a python code snippet, it should be eval'ed to a list.
"""
def f(p, method, params):
result = p.call(method, params)
print("RESULT %s" % result)
self._request(line, f)
def do_notify(self, line):
"""notify <peer> <method> <params>
send a msgpack-rpc notification.
<params> is a python code snippet, it should be eval'ed to a list.
"""
def f(p, method, params):
p.send_notification(method, params)
self._request(line, f)
def complete_request(self, text, line, begidx, endidx):
return self._complete_peer(text, line, begidx, endidx)
def complete_notify(self, text, line, begidx, endidx):
return self._complete_peer(text, line, begidx, endidx)
def do_EOF(self, _line):
sys.exit(0)
def emptyline(self):
self._peek_notification()
def postcmd(self, _stop, _line):
self._peek_notification()
def _peek_notification(self):
for k, p in peers.iteritems():
if p.client:
try:
p.client.peek_notification()
except EOFError:
p.client = None
print("disconnected %s" % k)
@staticmethod
def _save_termios():
return termios.tcgetattr(sys.stdin.fileno())
@staticmethod
def _restore_termios(t):
termios.tcsetattr(sys.stdin.fileno(), termios.TCSADRAIN, t)
def preloop(self):
self._saved_termios = self._save_termios()
signal.signal(signal.SIGALRM, self._timeout)
signal.alarm(1)
def onecmd(self, string):
self._in_onecmd = True
try:
return cmd.Cmd.onecmd(self, string)
finally:
self._in_onecmd = False
def _timeout(self, _sig, _frame):
if not self._in_onecmd:
# restore terminal settings. (cooked/raw, ...)
# required for pypy at least.
# this doesn't seem to be needed for cpython readline
# module but i'm not sure if it's by spec or luck.
o = self._save_termios()
self._restore_termios(self._saved_termios)
self._peek_notification()
self._restore_termios(o)
signal.alarm(self._notification_check_interval)
def main(args=None, prog=None):
CONF(args=args, prog=prog, project='rpc-cli', version='rpc-cli')
for p_str in CONF.peers:
name, addr = p_str.split('=')
host, port = addr.rsplit(':', 1)
add_peer(name, host, port)
Cmd().cmdloop()
if __name__ == "__main__":
main()
| 28.39749 | 77 | 0.594519 |
import ryu.contrib
ryu.contrib.update_module_path()
from ryu import cfg
import cmd
import signal
import socket
import sys
import termios
from ryu.lib import rpc
CONF = cfg.CONF
CONF.register_cli_opts([
cfg.ListOpt('peers', default=[], help='list of peers')
])
class Peer(object):
def __init__(self, name, addr):
self._name = name
self._addr = addr
self.client = None
try:
self.connect()
except:
pass
def connect(self):
self.client = None
s = socket.create_connection(self._addr)
self.client = rpc.Client(s, notification_callback=self.notification)
def try_to_connect(self, verbose=False):
if self.client:
return
try:
self.connect()
assert self.client
except Exception as e:
if verbose:
print("connection failure %s" % e)
raise EOFError
def notification(self, n):
print("NOTIFICATION from %s %s" % (self._name, n))
def call(self, method, params):
return self._do(lambda: self.client.call(method, params))
def send_notification(self, method, params):
self._do(lambda: self.client.send_notification(method, params))
def _do(self, f):
def g():
try:
return f()
except EOFError:
self.client = None
raise
self.try_to_connect(verbose=True)
try:
return g()
except EOFError:
print("disconnected. trying to connect...")
self.try_to_connect(verbose=True)
print("connected. retrying the request...")
return g()
peers = {}
def add_peer(name, host, port):
peers[name] = Peer(name, (host, port))
class Cmd(cmd.Cmd):
def __init__(self, *args, **kwargs):
self._in_onecmd = False
self._notification_check_interval = 1
self._saved_termios = None
cmd.Cmd.__init__(self, *args, **kwargs)
def _request(self, line, f):
args = line.split(None, 2)
try:
peer = args[0]
method = args[1]
params = eval(args[2])
except:
print("argument error")
return
try:
p = peers[peer]
except KeyError:
print("unknown peer %s" % peer)
return
try:
f(p, method, params)
except rpc.RPCError as e:
print("RPC ERROR %s" % e)
except EOFError:
print("disconnected")
def _complete_peer(self, text, line, _begidx, _endidx):
if len((line + 'x').split()) >= 3:
return []
return [name for name in peers if name.startswith(text)]
def do_request(self, line):
def f(p, method, params):
result = p.call(method, params)
print("RESULT %s" % result)
self._request(line, f)
def do_notify(self, line):
def f(p, method, params):
p.send_notification(method, params)
self._request(line, f)
def complete_request(self, text, line, begidx, endidx):
return self._complete_peer(text, line, begidx, endidx)
def complete_notify(self, text, line, begidx, endidx):
return self._complete_peer(text, line, begidx, endidx)
def do_EOF(self, _line):
sys.exit(0)
def emptyline(self):
self._peek_notification()
def postcmd(self, _stop, _line):
self._peek_notification()
def _peek_notification(self):
for k, p in peers.iteritems():
if p.client:
try:
p.client.peek_notification()
except EOFError:
p.client = None
print("disconnected %s" % k)
@staticmethod
def _save_termios():
return termios.tcgetattr(sys.stdin.fileno())
@staticmethod
def _restore_termios(t):
termios.tcsetattr(sys.stdin.fileno(), termios.TCSADRAIN, t)
def preloop(self):
self._saved_termios = self._save_termios()
signal.signal(signal.SIGALRM, self._timeout)
signal.alarm(1)
def onecmd(self, string):
self._in_onecmd = True
try:
return cmd.Cmd.onecmd(self, string)
finally:
self._in_onecmd = False
def _timeout(self, _sig, _frame):
if not self._in_onecmd:
# module but i'm not sure if it's by spec or luck.
o = self._save_termios()
self._restore_termios(self._saved_termios)
self._peek_notification()
self._restore_termios(o)
signal.alarm(self._notification_check_interval)
def main(args=None, prog=None):
CONF(args=args, prog=prog, project='rpc-cli', version='rpc-cli')
for p_str in CONF.peers:
name, addr = p_str.split('=')
host, port = addr.rsplit(':', 1)
add_peer(name, host, port)
Cmd().cmdloop()
if __name__ == "__main__":
main()
| true | true |
f71ec9cd99fa0dd28a834ab092fb530c42fa878e | 1,503 | py | Python | example/tree/models.py | avaddon/django-polymorphic-tree | 2077bc2b422d11b1bbf9efc0f5f8dcb83a53e550 | [
"Apache-2.0"
] | 13 | 2021-05-12T09:57:17.000Z | 2021-12-27T06:46:26.000Z | example/tree/models.py | avaddon/django-polymorphic-tree | 2077bc2b422d11b1bbf9efc0f5f8dcb83a53e550 | [
"Apache-2.0"
] | null | null | null | example/tree/models.py | avaddon/django-polymorphic-tree | 2077bc2b422d11b1bbf9efc0f5f8dcb83a53e550 | [
"Apache-2.0"
] | 1 | 2022-01-31T14:28:49.000Z | 2022-01-31T14:28:49.000Z | from __future__ import unicode_literals
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
from django.utils.translation import ugettext_lazy as _
from polymorphic_tree.models import PolymorphicMPTTModel, PolymorphicTreeForeignKey
# A base model for the tree:
@python_2_unicode_compatible
class BaseTreeNode(PolymorphicMPTTModel):
parent = PolymorphicTreeForeignKey('self', blank=True, null=True, related_name='children', verbose_name=_('parent'))
title = models.CharField(_("Title"), max_length=200)
def __str__(self):
return self.title
class Meta:
verbose_name = _("Tree node")
verbose_name_plural = _("Tree nodes")
# Create 3 derived models for the tree nodes:
class CategoryNode(BaseTreeNode):
opening_title = models.CharField(_("Opening title"), max_length=200)
opening_image = models.ImageField(_("Opening image"), upload_to='images', blank=True, null=True)
class Meta:
verbose_name = _("Category node")
verbose_name_plural = _("Category nodes")
class TextNode(BaseTreeNode):
extra_text = models.TextField(_('Extra text'))
# Extra settings:
can_have_children = False
class Meta:
verbose_name = _("Text node")
verbose_name_plural = _("Text nodes")
class ImageNode(BaseTreeNode):
image = models.ImageField(_("Image"), upload_to='images')
class Meta:
verbose_name = _("Image node")
verbose_name_plural = _("Image nodes")
| 30.06 | 120 | 0.72189 | from __future__ import unicode_literals
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
from django.utils.translation import ugettext_lazy as _
from polymorphic_tree.models import PolymorphicMPTTModel, PolymorphicTreeForeignKey
@python_2_unicode_compatible
class BaseTreeNode(PolymorphicMPTTModel):
parent = PolymorphicTreeForeignKey('self', blank=True, null=True, related_name='children', verbose_name=_('parent'))
title = models.CharField(_("Title"), max_length=200)
def __str__(self):
return self.title
class Meta:
verbose_name = _("Tree node")
verbose_name_plural = _("Tree nodes")
class CategoryNode(BaseTreeNode):
opening_title = models.CharField(_("Opening title"), max_length=200)
opening_image = models.ImageField(_("Opening image"), upload_to='images', blank=True, null=True)
class Meta:
verbose_name = _("Category node")
verbose_name_plural = _("Category nodes")
class TextNode(BaseTreeNode):
extra_text = models.TextField(_('Extra text'))
can_have_children = False
class Meta:
verbose_name = _("Text node")
verbose_name_plural = _("Text nodes")
class ImageNode(BaseTreeNode):
image = models.ImageField(_("Image"), upload_to='images')
class Meta:
verbose_name = _("Image node")
verbose_name_plural = _("Image nodes")
| true | true |
f71eca38d34b1b06054a870b9abde3fef819b045 | 1,660 | py | Python | code/config.py | sahaisaumya/informal_fallacies | 630d56edb27fb0d5f32dd125ad6e886e4ca2955e | [
"MIT"
] | 6 | 2021-08-02T14:37:46.000Z | 2022-02-11T14:09:04.000Z | code/config.py | sahaisaumya/informal_fallacies | 630d56edb27fb0d5f32dd125ad6e886e4ca2955e | [
"MIT"
] | null | null | null | code/config.py | sahaisaumya/informal_fallacies | 630d56edb27fb0d5f32dd125ad6e886e4ca2955e | [
"MIT"
] | 1 | 2021-12-17T16:02:11.000Z | 2021-12-17T16:02:11.000Z | import transformers
import argparse
def none_or_str(value):
if value == 'None':
return None
return value
def primary_parse():
parser = argparse.ArgumentParser()
parser.add_argument('--level') # {"token" "comment"}
parser.add_argument('--max_len', type=int, default=256)
parser.add_argument('--max_len_context', type=int, default=64)
parser.add_argument('--context', type=none_or_str) # {"parent", "title"}
parser.add_argument('--train_batch_size', type=int, default=8)
parser.add_argument('--valid_batch_size', type=int, default=16)
parser.add_argument('--test_batch_size', type=int, default=16)
parser.add_argument('--epochs', type=int, default=10)
parser.add_argument('--train_flag', type=int, default=1) # specify 0 to evaluate on test data only
parser.add_argument('--seed', type=int, default=100)
parser.add_argument('--base_model', default='bert-base-uncased')
parser.add_argument('--model') # {"bertModel" #mgnModel}
parser.add_argument('--folder') # path to folder with data splits
parser.add_argument('--classes') #{"multi" "binary"}
parser.add_argument('--alpha', type=float, default=0.5)
return parser
def secondary_parse(args):
sec_args = {}
sec_args['training_file'] = f"../data/{args.folder}/train.txt"
sec_args['valid_file'] = f"../data/{args.folder}/dev.txt"
sec_args['test_file'] = f"../data/{args.folder}/test.txt"
sec_args['tokenizer'] = transformers.BertTokenizer.from_pretrained( args.base_model, do_lower_case=True, local_files_only=True)
sec_args['model_path'] = f"{args.level}_{args.base_model}_{args.epochs}_{args.model}_{args.folder}_{args.classes}_{args.context}_{args.seed}.bin"
return sec_args
| 43.684211 | 146 | 0.73494 | import transformers
import argparse
def none_or_str(value):
if value == 'None':
return None
return value
def primary_parse():
parser = argparse.ArgumentParser()
parser.add_argument('--level')
parser.add_argument('--max_len', type=int, default=256)
parser.add_argument('--max_len_context', type=int, default=64)
parser.add_argument('--context', type=none_or_str)
parser.add_argument('--train_batch_size', type=int, default=8)
parser.add_argument('--valid_batch_size', type=int, default=16)
parser.add_argument('--test_batch_size', type=int, default=16)
parser.add_argument('--epochs', type=int, default=10)
parser.add_argument('--train_flag', type=int, default=1)
parser.add_argument('--seed', type=int, default=100)
parser.add_argument('--base_model', default='bert-base-uncased')
parser.add_argument('--model') dd_argument('--folder')
parser.add_argument('--classes')
parser.add_argument('--alpha', type=float, default=0.5)
return parser
def secondary_parse(args):
sec_args = {}
sec_args['training_file'] = f"../data/{args.folder}/train.txt"
sec_args['valid_file'] = f"../data/{args.folder}/dev.txt"
sec_args['test_file'] = f"../data/{args.folder}/test.txt"
sec_args['tokenizer'] = transformers.BertTokenizer.from_pretrained( args.base_model, do_lower_case=True, local_files_only=True)
sec_args['model_path'] = f"{args.level}_{args.base_model}_{args.epochs}_{args.model}_{args.folder}_{args.classes}_{args.context}_{args.seed}.bin"
return sec_args
| true | true |
f71eca6b3f158fdbe0a1271729c857c9ffafb3c5 | 4,565 | py | Python | ValveAnnulusAnalysis/HeartValveLib/helpers.py | SlicerHeart/SlicerHeart | 5ead8d723f6dec67ea6065b847cb4f8dce5bef72 | [
"BSD-3-Clause"
] | 48 | 2016-04-13T10:22:53.000Z | 2022-03-21T16:31:41.000Z | ValveAnnulusAnalysis/HeartValveLib/helpers.py | SlicerHeart/SlicerHeart | 5ead8d723f6dec67ea6065b847cb4f8dce5bef72 | [
"BSD-3-Clause"
] | 14 | 2018-10-25T21:15:20.000Z | 2021-11-26T16:55:55.000Z | ValveAnnulusAnalysis/HeartValveLib/helpers.py | SlicerHeart/SlicerHeart | 5ead8d723f6dec67ea6065b847cb4f8dce5bef72 | [
"BSD-3-Clause"
] | 21 | 2017-09-12T08:20:36.000Z | 2021-10-30T02:22:11.000Z | """ collection of functions that are useful for several classes but non-specific to any """
import slicer
import logging
def getBinaryLabelmapRepresentation(segmentationNode, segmentID: str):
segmentLabelmap = slicer.vtkOrientedImageData()
segmentationNode.GetBinaryLabelmapRepresentation(segmentID, segmentLabelmap)
return segmentLabelmap
def getSpecificHeartValveModelNodes(phases: list):
heartValveModelNodes = []
for phase in phases:
try:
heartValveModelNodes.extend(list(getValveModelNodesMatchingPhase(phase)))
except ValueError as exc:
logging.warning(exc)
return heartValveModelNodes
def getSpecificHeartValveModelNodesMatchingPhaseAndType(phases: list, valveType: str, sort:bool=True):
valveModels = []
for valveModel in getAllHeartValveModelNodes():
if valveModel.getValveType() == valveType and getValvePhaseShortName(valveModel) in phases:
valveModels.append(valveModel)
if sort:
return sorted(valveModels, key=lambda valveModel: phases.index(getValvePhaseShortName(valveModel)))
return valveModels
def getSpecificHeartValveMeasurementNodes(identifier):
valveQuantificationLogic = slicer.modules.valvequantification.widgetRepresentation().self().logic
validMeasurementNodes = []
for measurementNode in getAllHeartValveMeasurementNodes():
measurementPreset = valveQuantificationLogic.getMeasurementPresetByMeasurementNode(measurementNode)
if not measurementPreset or measurementPreset.QUANTIFICATION_RESULTS_IDENTIFIER != identifier:
continue
validMeasurementNodes.append(measurementNode)
return validMeasurementNodes
def getFirstValveModelNodeMatchingPhase(phase='MS'):
for valveModelNode in getAllHeartValveModelNodes():
if getValvePhaseShortName(valveModelNode) == phase:
return valveModelNode
raise ValueError("Could not find valve for phase %s" % phase)
def getValveModelNodesMatchingPhase(phase):
for valveModelNode in getAllHeartValveModelNodes():
if getValvePhaseShortName(valveModelNode) == phase:
yield valveModelNode
def getFirstValveModelNodeMatchingPhaseAndType(phase, valveType):
for valveModel in getValveModelNodesMatchingPhase(phase):
if valveModel.getValveType() == valveType:
return valveModel
raise ValueError(f"Could not find valve with type {valveType} for phase {phase}")
def getValveModelNodesMatchingPhaseAndType(phase, valveType):
valveModels = []
for valveModel in getValveModelNodesMatchingPhase(phase):
if valveModel.getValveType() == valveType:
valveModels.append(valveModel)
return valveModels
def getAllHeartValveModelNodes():
import HeartValves
return map(HeartValves.getValveModel, getAllHeartValveNodes())
def getAllHeartValveNodes():
return getAllModuleSpecificScriptableNodes('HeartValve')
def getAllHeartValveMeasurementNodes():
return getAllModuleSpecificScriptableNodes('HeartValveMeasurement')
def getAllModuleSpecificScriptableNodes(moduleName):
return filter(lambda node: node.GetAttribute('ModuleName') == moduleName,
slicer.util.getNodesByClass('vtkMRMLScriptedModuleNode'))
def getHeartValveMeasurementNode(phase):
for measurementNode in getAllHeartValveMeasurementNodes():
cardiacCyclePhaseNames = getMeasurementCardiacCyclePhaseShortNames(measurementNode)
if len(cardiacCyclePhaseNames) == 1 and cardiacCyclePhaseNames[0] == phase:
return measurementNode
def getMeasurementCardiacCyclePhaseShortNames(measurementNode):
import ValveQuantification
valveQuantificationLogic = ValveQuantification.ValveQuantificationLogic()
return valveQuantificationLogic.getMeasurementCardiacCyclePhaseShortNames(measurementNode)
def getAllFilesWithExtension(directory, extension, file_name_only=False):
import os
import fnmatch
files = []
for root, dirnames, filenames in os.walk(directory):
for filename in fnmatch.filter(filenames, '*{}'.format(extension)):
files.append(filename if file_name_only else os.path.join(root, filename))
return files
def isMRBFile(mrb_file):
import os
return os.path.isfile(mrb_file) and mrb_file.lower().endswith(".mrb")
def getValveModelForSegmentationNode(segmentationNode):
for valveModel in getAllHeartValveModelNodes():
if valveModel.getLeafletSegmentationNode() is segmentationNode:
return valveModel
return None
def getValvePhaseShortName(valveModel):
cardiacPhase = valveModel.getCardiacCyclePhase()
cardiacCyclePhasePreset = valveModel.cardiacCyclePhasePresets[cardiacPhase]
return cardiacCyclePhasePreset['shortname'] | 35.664063 | 103 | 0.806134 |
import slicer
import logging
def getBinaryLabelmapRepresentation(segmentationNode, segmentID: str):
segmentLabelmap = slicer.vtkOrientedImageData()
segmentationNode.GetBinaryLabelmapRepresentation(segmentID, segmentLabelmap)
return segmentLabelmap
def getSpecificHeartValveModelNodes(phases: list):
heartValveModelNodes = []
for phase in phases:
try:
heartValveModelNodes.extend(list(getValveModelNodesMatchingPhase(phase)))
except ValueError as exc:
logging.warning(exc)
return heartValveModelNodes
def getSpecificHeartValveModelNodesMatchingPhaseAndType(phases: list, valveType: str, sort:bool=True):
valveModels = []
for valveModel in getAllHeartValveModelNodes():
if valveModel.getValveType() == valveType and getValvePhaseShortName(valveModel) in phases:
valveModels.append(valveModel)
if sort:
return sorted(valveModels, key=lambda valveModel: phases.index(getValvePhaseShortName(valveModel)))
return valveModels
def getSpecificHeartValveMeasurementNodes(identifier):
valveQuantificationLogic = slicer.modules.valvequantification.widgetRepresentation().self().logic
validMeasurementNodes = []
for measurementNode in getAllHeartValveMeasurementNodes():
measurementPreset = valveQuantificationLogic.getMeasurementPresetByMeasurementNode(measurementNode)
if not measurementPreset or measurementPreset.QUANTIFICATION_RESULTS_IDENTIFIER != identifier:
continue
validMeasurementNodes.append(measurementNode)
return validMeasurementNodes
def getFirstValveModelNodeMatchingPhase(phase='MS'):
for valveModelNode in getAllHeartValveModelNodes():
if getValvePhaseShortName(valveModelNode) == phase:
return valveModelNode
raise ValueError("Could not find valve for phase %s" % phase)
def getValveModelNodesMatchingPhase(phase):
for valveModelNode in getAllHeartValveModelNodes():
if getValvePhaseShortName(valveModelNode) == phase:
yield valveModelNode
def getFirstValveModelNodeMatchingPhaseAndType(phase, valveType):
for valveModel in getValveModelNodesMatchingPhase(phase):
if valveModel.getValveType() == valveType:
return valveModel
raise ValueError(f"Could not find valve with type {valveType} for phase {phase}")
def getValveModelNodesMatchingPhaseAndType(phase, valveType):
valveModels = []
for valveModel in getValveModelNodesMatchingPhase(phase):
if valveModel.getValveType() == valveType:
valveModels.append(valveModel)
return valveModels
def getAllHeartValveModelNodes():
import HeartValves
return map(HeartValves.getValveModel, getAllHeartValveNodes())
def getAllHeartValveNodes():
return getAllModuleSpecificScriptableNodes('HeartValve')
def getAllHeartValveMeasurementNodes():
return getAllModuleSpecificScriptableNodes('HeartValveMeasurement')
def getAllModuleSpecificScriptableNodes(moduleName):
return filter(lambda node: node.GetAttribute('ModuleName') == moduleName,
slicer.util.getNodesByClass('vtkMRMLScriptedModuleNode'))
def getHeartValveMeasurementNode(phase):
for measurementNode in getAllHeartValveMeasurementNodes():
cardiacCyclePhaseNames = getMeasurementCardiacCyclePhaseShortNames(measurementNode)
if len(cardiacCyclePhaseNames) == 1 and cardiacCyclePhaseNames[0] == phase:
return measurementNode
def getMeasurementCardiacCyclePhaseShortNames(measurementNode):
import ValveQuantification
valveQuantificationLogic = ValveQuantification.ValveQuantificationLogic()
return valveQuantificationLogic.getMeasurementCardiacCyclePhaseShortNames(measurementNode)
def getAllFilesWithExtension(directory, extension, file_name_only=False):
import os
import fnmatch
files = []
for root, dirnames, filenames in os.walk(directory):
for filename in fnmatch.filter(filenames, '*{}'.format(extension)):
files.append(filename if file_name_only else os.path.join(root, filename))
return files
def isMRBFile(mrb_file):
import os
return os.path.isfile(mrb_file) and mrb_file.lower().endswith(".mrb")
def getValveModelForSegmentationNode(segmentationNode):
for valveModel in getAllHeartValveModelNodes():
if valveModel.getLeafletSegmentationNode() is segmentationNode:
return valveModel
return None
def getValvePhaseShortName(valveModel):
cardiacPhase = valveModel.getCardiacCyclePhase()
cardiacCyclePhasePreset = valveModel.cardiacCyclePhasePresets[cardiacPhase]
return cardiacCyclePhasePreset['shortname'] | true | true |
f71ecd5b355fa5a6dd0a436c34d7ff7e754d15d2 | 1,464 | py | Python | tests/test_response.py | Vlsarro/pystexchapi | 27618002165fc536798c46c486e78caeb85905bf | [
"MIT"
] | null | null | null | tests/test_response.py | Vlsarro/pystexchapi | 27618002165fc536798c46c486e78caeb85905bf | [
"MIT"
] | null | null | null | tests/test_response.py | Vlsarro/pystexchapi | 27618002165fc536798c46c486e78caeb85905bf | [
"MIT"
] | null | null | null | import unittest
import json
import requests
from pystexchapi.response import StockExchangeResponseParser, APIResponse
from pystexchapi.exc import APIDataException, APIResponseParsingException
from tests import TICKER_RESPONSE, GENERIC_ERROR_RESPONSE
def raise_value_error():
raise ValueError()
class TestStockExchangeResponseParser(unittest.TestCase):
@staticmethod
def _make_response(content='', status_code=200) -> requests.Response:
response = requests.Response()
_content = content
response._content = _content
response.status_code = status_code
response.encoding = 'utf-8'
response.json = lambda: json.loads(_content)
return response
def test_parse(self):
resp = StockExchangeResponseParser.parse(self._make_response(content=TICKER_RESPONSE))
self.assertTrue(resp)
self.assertIsInstance(resp, APIResponse)
data = resp.data
self.assertIsInstance(data, list)
self.assertEqual(len(data), 1)
def test_raise_on_error(self):
response = self._make_response(content=GENERIC_ERROR_RESPONSE)
with self.assertRaises(APIDataException) as cm:
StockExchangeResponseParser.parse(response)
self.assertEqual(cm.exception.msg, 'Invalid request')
response.json = raise_value_error
with self.assertRaises(APIResponseParsingException):
StockExchangeResponseParser.parse(response)
| 31.826087 | 94 | 0.729508 | import unittest
import json
import requests
from pystexchapi.response import StockExchangeResponseParser, APIResponse
from pystexchapi.exc import APIDataException, APIResponseParsingException
from tests import TICKER_RESPONSE, GENERIC_ERROR_RESPONSE
def raise_value_error():
raise ValueError()
class TestStockExchangeResponseParser(unittest.TestCase):
@staticmethod
def _make_response(content='', status_code=200) -> requests.Response:
response = requests.Response()
_content = content
response._content = _content
response.status_code = status_code
response.encoding = 'utf-8'
response.json = lambda: json.loads(_content)
return response
def test_parse(self):
resp = StockExchangeResponseParser.parse(self._make_response(content=TICKER_RESPONSE))
self.assertTrue(resp)
self.assertIsInstance(resp, APIResponse)
data = resp.data
self.assertIsInstance(data, list)
self.assertEqual(len(data), 1)
def test_raise_on_error(self):
response = self._make_response(content=GENERIC_ERROR_RESPONSE)
with self.assertRaises(APIDataException) as cm:
StockExchangeResponseParser.parse(response)
self.assertEqual(cm.exception.msg, 'Invalid request')
response.json = raise_value_error
with self.assertRaises(APIResponseParsingException):
StockExchangeResponseParser.parse(response)
| true | true |
f71ecd6238bcff3815bc4c498aa4bcae208a60d4 | 12,301 | py | Python | read_xml_export.py | DavidJLambert/Two-Windows-Event-Log-Summarizers | 393ce7dc550b9fd52725e233f15e4eff1e9bc47a | [
"MIT"
] | null | null | null | read_xml_export.py | DavidJLambert/Two-Windows-Event-Log-Summarizers | 393ce7dc550b9fd52725e233f15e4eff1e9bc47a | [
"MIT"
] | null | null | null | read_xml_export.py | DavidJLambert/Two-Windows-Event-Log-Summarizers | 393ce7dc550b9fd52725e233f15e4eff1e9bc47a | [
"MIT"
] | null | null | null | """ read_xml_export.py
REPOSITORY:
https://github.com/DavidJLambert/Two-Windows-Event-Log-Summarizers
SUMMARY:
Scans XML exports of the Windows Event Log and reports summary statistics.
AUTHOR:
David J. Lambert
VERSION:
0.1.1
DATE:
July 10, 2020
"""
# -------- IMPORTS.
from __future__ import print_function
import xml.etree.ElementTree
import win32security
from frozendict import frozendict
import glob
from zipfile import ZipFile
import os
# -------- CODE.
def handle_files() -> None:
""" Driver program. Find XML files in current directory.
Args:
none.
Returns:
none.
Raises:
none.
"""
# Read and parse XML file(s). First try to unzip any zipped files.
xml_zip_files = glob.glob('./sample_data/*.xml.zip')
if len(xml_zip_files) > 0:
for xml_zip_file in xml_zip_files:
with ZipFile(xml_zip_file, "r") as f:
unzipped_name = xml_zip_file.replace(".zip", "")
unzipped_exists = os.path.isfile(unzipped_name)
if not unzipped_exists:
f.extractall("./sample_data")
# Read and parse XML file(s).
xml_files = glob.glob('./sample_data/*.xml')
if len(xml_files) == 0:
print("### No XML files to process.")
exit(1)
output_start = "#"*10 + " "*2
last_xml_file = xml_files[-1]
for xml_file in xml_files:
print("\n{}STARTING FILE '{}'.".format(output_start, xml_file[2:]))
tree = xml.etree.ElementTree.parse(xml_file)
events_root = tree.getroot()
analyze_one_file(events_root)
print("\n{}END OF FILE '{}'.".format(output_start, xml_file[2:]))
if xml_file != last_xml_file:
del events_root
del tree
# End of function handle_files.
def analyze_one_file(events_root) -> None:
""" Main analysis. Go thru one file, compile statistics on contents.
Args:
events_root (object): root of the current XML node tree.
Returns:
none.
Raises:
none.
"""
# Get tag_root, the start of the tag for each node in this XML tree.
tag_root = events_root[0].tag.replace("Event", "")
# To count children of level 2 node "event_node" (tag = tag_root+"Event").
count_children = False
if count_children:
count_branch = {"Count": 0}
# Nodes from subtree of level 3 node "sys_root".
sys_nodes = {"EventID", "Version", "Level", "Task", "Opcode", "Keywords",
"Channel", "Computer"}
# Fields of "Provider" node in subtree of level 3 node "sys_root".
provider_fields = {"Name", "Guid", "EventSourceName"}
# Nodes from subtree of level 3 node "render_root".
render_nodes = {"Level", "Task", "Opcode", "Channel", "Provider"}
# Map names in "sys_nodes" and "render_nodes" to Event Viewer field names.
view_name = {"Provider": "Provider", "Channel": "Log Name"}
for node in sys_nodes:
if node != "Channel":
view_name[node] = node
# Map names in "provider_fields" to field names seen in Event Viewer.
view_name["Name"] = "Provider"
view_name["Guid"] = "Guid"
view_name["EventSourceName"] = "Source Name"
# Map names in "security_node" to field names seen in Event Viewer.
view_name["UserID"] = "User Name"
# Event summary. The keys are values of "view_name".
event_summary = dict.fromkeys(view_name.values())
# Where we compile event statistics.
event_stats = {}
# Iterate over all records in the exported XML file.
for event_node in events_root:
# Count children of level 2 node "event_node" (tag = tag_root+"Event").
if count_children:
count_branch["Count"] += 1
for child_node in event_node:
branch = child_node.tag
if branch not in count_branch:
count_branch[branch] = 1
else:
count_branch[branch] += 1
# The level 2 node "event_node" can have children with these tags:
# tag_root+"EventData", tag_root+"RenderingInfo", tag_root+"System",
# and tag_root+"UserData"
# Each event always has a child with tag = tag_root+"System".
# Each event always has a child with tag = tag_root+"EventData"
# or tag = tag_root+"UserData".
# The level 3 node "sys_root", with tag = tag_root+"System".
sys_root = event_node.find(tag_root + "System")
# Get info from child nodes of level 3 node "sys_root".
for node in sys_nodes:
event_summary[view_name[node]] = find_field(sys_root, node,
tag_root)
# Fields of the "Provider" node.
provider_node = sys_root.find(tag_root + "Provider")
for field in provider_fields:
event_summary[view_name[field]] = sanitize(provider_node.get(field))
# Fields of the "Security" node.
security_node = sys_root.find(tag_root + "Security")
event_summary["User Name"] = get_user_name(security_node.get("UserID"))
# Level 3 node "render_root" (tag=tag_root+"RenderingInfo").
render_root = event_node.find(tag_root + "RenderingInfo")
if render_root is not None:
# Get info from child nodes of level 3 node "render_root".
for node in render_nodes:
value = sanitize(find_field(render_root, node, tag_root))
if value != "None":
event_summary[view_name[node]] = value
# Fields of the "Keywords" node.
keywords_node = render_root.find(tag_root + "Keywords")
value = ""
if keywords_node is not None:
for keyword in keywords_node:
text = sanitize(keyword.text)
if text != "None":
if value == "":
value = text
else:
value += " " + text
if value != "":
event_summary["Keywords"] = value
# Translating int to str not done in "render_root", or no "render_root".
event_summary["Opcode"] = opcode_name(event_summary["Opcode"])
event_summary["Level"] = level_name(event_summary["Level"])
event_summary["Keywords"] = keywords_name(event_summary["Keywords"])
# print(event_summary)
if frozendict(event_summary) in event_stats.keys():
event_stats[frozendict(event_summary)] += 1
else:
event_stats[frozendict(event_summary)] = 1
# The count of the children of level 2 node "event_node".
if count_children:
print(count_branch)
# Print event stats
for event_summary, count in sorted(event_stats.items(), reverse=True,
key=lambda item: item[1]):
print("\n## {} occurrences of this event:".format(count))
for key, value in event_summary.items():
print(key + ": " + value)
# End of function analyze_one_file.
def find_field(child, field_name: str, tag_root: str) -> str:
""" Fetch specific fields of the child nodes of current the XML node.
Args:
child (object): child of node that may have field = field_name.
field_name (str): name of field of XML node.
tag_root (str): start of tag of each XML node in tree.
Returns:
text (str): text of the field with "field_name".
Raises:
none.
"""
field = child.find(tag_root + field_name)
if field is None:
return ""
else:
return sanitize(field.text)
# End of function find_field.
def sanitize(this) -> str:
""" Convert object to string.
Args:
this (object).
Returns:
str(this) (str)
Raises:
none.
"""
if this is None:
return "None"
else:
return str(this)
# End of function sanitize.
def get_user_name(sid) -> str:
""" Translate from User SID to User Name.
Args:
PySID (object): contains a user's SID
(See http://timgolden.me.uk/pywin32-docs/win32security.html).
Returns:
username (str): Windows user name with argument's SID.
Raises:
none.
"""
if sid is None:
return "None"
else:
py_sid = win32security.GetBinarySid(sid)
return win32security.LookupAccountSid(None, py_sid)[0]
# End of function get_user_name.
def level_name(level: str) -> str:
""" Translate 'Level' Event Log field from int to descriptive string.
Args:
level (str(int)): severity level of event.
Returns:
severity (str).
Raises:
none.
"""
name = {"0": "Information",
"1": "Critical",
"2": "Error",
"3": "Warning",
"4": "Information",
"5": "Verbose"}
if level in name.keys():
return name[level]
else:
return sanitize(level)
# End of function level_name.
def opcode_name(opcode: str) -> str:
""" Translate 'Opcode' Event Log field from int to descriptive string.
Args:
Opcode (str(int)): event operation code.
Returns:
operation description (string).
Raises:
none.
"""
""" Obtained by correlating values of 'Opcode' in the System and
RenderingInfo subtrees.
Made sure each value in System subtree always associated with same value
in RenderingInfo subtree (not true of 'Task' field!).
Sometimes two values in System subtree have same string in RenderingInfo
subtree, these repetitions are not typos.
"""
name = {"": "Info", "0": "Info",
"1": "Start", "2": "Stop",
"12": "Download", "13": "Installation",
"62": "ServiceStart", "63": "ServiceStop",
"68": "ServiceStart", "69": "ServiceStop",
"104": "ServiceStopWithRefCount", "129": "ServiceShutdown"}
if opcode in name.keys():
return name[opcode]
else:
return sanitize(opcode)
# End of function opcode_name.
def keywords_name(keywords: str) -> str:
""" Translate 'Keywords' Event Log field from hex to descriptive string.
Args:
keywords (str): hexidecimal string.
Returns:
keywords_name (str): keyword(s) corresponding to hexidecimal arg.
Raises:
none.
"""
""" Obtained by correlating values of 'Keywords' field in the System subtree
with the 'Keywords' subtree in the RenderingInfo subtrees
Made sure each value in System subtree always associated with same value
in RenderingInfo subtree (not true of 'Task' field!).
Sometimes two values in System subtree have same string in RenderingInfo
subtree, repetitions are not typos.
"""
name = {"0x80000000000000": "Classic",
"0x4000400000000001": "Core Events",
"0x4000400000000002": "Helper Class Events",
"0x8000000000000010": "Time",
"0x8000000000000018": "Installation Success",
"0x8000000000000028": "Installation Failure",
"0x8000000000002004": "Download Started",
"0x8000000000002008": "Installation Started",
"0x8001000000000001": "Performance, Response Time",
"0x8080000000000000": "Classic"}
if keywords in name.keys():
return name[keywords]
else:
return sanitize(keywords)
# End of function keywords_name.
def flatten(node, tag_root) -> None:
""" Flattens subtree of a node.
Args:
node (object): XML tree subtree.
tag_root: ?
Returns:
none.
Raises:
none.
"""
""" Demo of flattening the subtree of the given node. Alternative method
of walking node tree. Elegant, but not as efficient.
"""
for child in node.iter():
tag = child.tag.replace(tag_root, "").strip()
child_text = child.text
if child_text is not None:
print(tag + ": " + child_text.strip())
if len(child.attrib):
for key, value in child.attrib.items():
print(tag + "-" + key.strip() + ": " + value.strip())
# End of function flatten.
if __name__ == '__main__':
handle_files()
| 33.156334 | 80 | 0.598 |
from __future__ import print_function
import xml.etree.ElementTree
import win32security
from frozendict import frozendict
import glob
from zipfile import ZipFile
import os
def handle_files() -> None:
xml_zip_files = glob.glob('./sample_data/*.xml.zip')
if len(xml_zip_files) > 0:
for xml_zip_file in xml_zip_files:
with ZipFile(xml_zip_file, "r") as f:
unzipped_name = xml_zip_file.replace(".zip", "")
unzipped_exists = os.path.isfile(unzipped_name)
if not unzipped_exists:
f.extractall("./sample_data")
xml_files = glob.glob('./sample_data/*.xml')
if len(xml_files) == 0:
print("### No XML files to process.")
exit(1)
output_start = "#"*10 + " "*2
last_xml_file = xml_files[-1]
for xml_file in xml_files:
print("\n{}STARTING FILE '{}'.".format(output_start, xml_file[2:]))
tree = xml.etree.ElementTree.parse(xml_file)
events_root = tree.getroot()
analyze_one_file(events_root)
print("\n{}END OF FILE '{}'.".format(output_start, xml_file[2:]))
if xml_file != last_xml_file:
del events_root
del tree
def analyze_one_file(events_root) -> None:
tag_root = events_root[0].tag.replace("Event", "")
count_children = False
if count_children:
count_branch = {"Count": 0}
sys_nodes = {"EventID", "Version", "Level", "Task", "Opcode", "Keywords",
"Channel", "Computer"}
provider_fields = {"Name", "Guid", "EventSourceName"}
render_nodes = {"Level", "Task", "Opcode", "Channel", "Provider"}
view_name = {"Provider": "Provider", "Channel": "Log Name"}
for node in sys_nodes:
if node != "Channel":
view_name[node] = node
view_name["Name"] = "Provider"
view_name["Guid"] = "Guid"
view_name["EventSourceName"] = "Source Name"
view_name["UserID"] = "User Name"
event_summary = dict.fromkeys(view_name.values())
event_stats = {}
for event_node in events_root:
if count_children:
count_branch["Count"] += 1
for child_node in event_node:
branch = child_node.tag
if branch not in count_branch:
count_branch[branch] = 1
else:
count_branch[branch] += 1
sys_root = event_node.find(tag_root + "System")
for node in sys_nodes:
event_summary[view_name[node]] = find_field(sys_root, node,
tag_root)
provider_node = sys_root.find(tag_root + "Provider")
for field in provider_fields:
event_summary[view_name[field]] = sanitize(provider_node.get(field))
security_node = sys_root.find(tag_root + "Security")
event_summary["User Name"] = get_user_name(security_node.get("UserID"))
render_root = event_node.find(tag_root + "RenderingInfo")
if render_root is not None:
for node in render_nodes:
value = sanitize(find_field(render_root, node, tag_root))
if value != "None":
event_summary[view_name[node]] = value
keywords_node = render_root.find(tag_root + "Keywords")
value = ""
if keywords_node is not None:
for keyword in keywords_node:
text = sanitize(keyword.text)
if text != "None":
if value == "":
value = text
else:
value += " " + text
if value != "":
event_summary["Keywords"] = value
event_summary["Opcode"] = opcode_name(event_summary["Opcode"])
event_summary["Level"] = level_name(event_summary["Level"])
event_summary["Keywords"] = keywords_name(event_summary["Keywords"])
if frozendict(event_summary) in event_stats.keys():
event_stats[frozendict(event_summary)] += 1
else:
event_stats[frozendict(event_summary)] = 1
if count_children:
print(count_branch)
for event_summary, count in sorted(event_stats.items(), reverse=True,
key=lambda item: item[1]):
print("\n## {} occurrences of this event:".format(count))
for key, value in event_summary.items():
print(key + ": " + value)
def find_field(child, field_name: str, tag_root: str) -> str:
field = child.find(tag_root + field_name)
if field is None:
return ""
else:
return sanitize(field.text)
def sanitize(this) -> str:
if this is None:
return "None"
else:
return str(this)
def get_user_name(sid) -> str:
if sid is None:
return "None"
else:
py_sid = win32security.GetBinarySid(sid)
return win32security.LookupAccountSid(None, py_sid)[0]
def level_name(level: str) -> str:
name = {"0": "Information",
"1": "Critical",
"2": "Error",
"3": "Warning",
"4": "Information",
"5": "Verbose"}
if level in name.keys():
return name[level]
else:
return sanitize(level)
def opcode_name(opcode: str) -> str:
name = {"": "Info", "0": "Info",
"1": "Start", "2": "Stop",
"12": "Download", "13": "Installation",
"62": "ServiceStart", "63": "ServiceStop",
"68": "ServiceStart", "69": "ServiceStop",
"104": "ServiceStopWithRefCount", "129": "ServiceShutdown"}
if opcode in name.keys():
return name[opcode]
else:
return sanitize(opcode)
def keywords_name(keywords: str) -> str:
name = {"0x80000000000000": "Classic",
"0x4000400000000001": "Core Events",
"0x4000400000000002": "Helper Class Events",
"0x8000000000000010": "Time",
"0x8000000000000018": "Installation Success",
"0x8000000000000028": "Installation Failure",
"0x8000000000002004": "Download Started",
"0x8000000000002008": "Installation Started",
"0x8001000000000001": "Performance, Response Time",
"0x8080000000000000": "Classic"}
if keywords in name.keys():
return name[keywords]
else:
return sanitize(keywords)
def flatten(node, tag_root) -> None:
for child in node.iter():
tag = child.tag.replace(tag_root, "").strip()
child_text = child.text
if child_text is not None:
print(tag + ": " + child_text.strip())
if len(child.attrib):
for key, value in child.attrib.items():
print(tag + "-" + key.strip() + ": " + value.strip())
if __name__ == '__main__':
handle_files()
| true | true |
f71ece1554e7f63c5e58bcf712cb98e72ca6840c | 349 | py | Python | assets/csv_counter.py | romanbird/jeopardy-bot | d47600d9261fefcb5f08d699ddf8b5fdcd072da1 | [
"MIT"
] | 2 | 2019-02-09T15:33:39.000Z | 2019-02-10T11:43:19.000Z | assets/csv_counter.py | romanbird/jeopardy-bot | d47600d9261fefcb5f08d699ddf8b5fdcd072da1 | [
"MIT"
] | null | null | null | assets/csv_counter.py | romanbird/jeopardy-bot | d47600d9261fefcb5f08d699ddf8b5fdcd072da1 | [
"MIT"
] | null | null | null | import csv
import pandas as pd
from tqdm import tqdm
from collections import Counter
dbRead = open('db.csv', "r", newline='', encoding='utf8')
db = list(csv.reader(dbRead, delimiter=","))
column = [row[-1] for row in db]
for row in tqdm(db):
row[-2]=Counter(column)[row[-1]]
df=pd.DataFrame(data=db)
df.to_csv('db.csv', sep=",", encoding='utf8') | 31.727273 | 57 | 0.681948 | import csv
import pandas as pd
from tqdm import tqdm
from collections import Counter
dbRead = open('db.csv', "r", newline='', encoding='utf8')
db = list(csv.reader(dbRead, delimiter=","))
column = [row[-1] for row in db]
for row in tqdm(db):
row[-2]=Counter(column)[row[-1]]
df=pd.DataFrame(data=db)
df.to_csv('db.csv', sep=",", encoding='utf8') | true | true |
f71ecec53aa3a76357554679243d45b4d4803a19 | 188,462 | py | Python | python/idc.py | 0xeb/src | 01d069ae681856ecc9c7a96067f60dc86a33f0ab | [
"BSD-3-Clause"
] | null | null | null | python/idc.py | 0xeb/src | 01d069ae681856ecc9c7a96067f60dc86a33f0ab | [
"BSD-3-Clause"
] | null | null | null | python/idc.py | 0xeb/src | 01d069ae681856ecc9c7a96067f60dc86a33f0ab | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python
#---------------------------------------------------------------------
# IDAPython - Python plugin for Interactive Disassembler
#
# Original IDC.IDC:
# Copyright (c) 1990-2010 Ilfak Guilfanov
#
# Python conversion:
# Copyright (c) 2004-2010 Gergely Erdelyi <gergely.erdelyi@d-dome.net>
#
# All rights reserved.
#
# For detailed copyright information see the file COPYING in
# the root of the distribution archive.
#---------------------------------------------------------------------
# idc.py - IDC compatibility module
#---------------------------------------------------------------------
"""
IDC compatibility module
This file contains IDA built-in function declarations and internal bit
definitions. Each byte of the program has 32-bit flags (low 8 bits keep
the byte value). These 32 bits are used in get_full_flags/get_flags functions.
This file is subject to change without any notice.
Future versions of IDA may use other definitions.
"""
from __future__ import print_function
# FIXME: Perhaps those should be loaded on-demand
import ida_idaapi
import ida_auto
import ida_dbg
import ida_diskio
import ida_entry
import ida_enum
import ida_expr
import ida_fixup
import ida_frame
import ida_funcs
import ida_gdl
import ida_ida
import ida_idc
import ida_bytes
import ida_idd
import ida_idp
import ida_kernwin
import ida_lines
import ida_loader
import ida_moves
import ida_nalt
import ida_name
import ida_netnode
import ida_offset
import ida_pro
import ida_search
import ida_segment
import ida_segregs
import ida_struct
import ida_typeinf
import ida_ua
import ida_xref
import _ida_idaapi
import os
import re
import struct
import time
import types
import sys
__EA64__ = ida_idaapi.BADADDR == 0xFFFFFFFFFFFFFFFF
WORDMASK = 0xFFFFFFFFFFFFFFFF if __EA64__ else 0xFFFFFFFF # just there for bw-compat purposes; please don't use
class DeprecatedIDCError(Exception):
"""
Exception for deprecated function calls
"""
pass
__warned_deprecated_proto_confusion = {}
def __warn_once_deprecated_proto_confusion(what, alternative):
if what not in __warned_deprecated_proto_confusion:
print("NOTE: idc.%s is deprecated due to signature confusion with %s. Please use %s instead" % (
what,
alternative,
alternative))
__warned_deprecated_proto_confusion[what] = True
def _IDC_GetAttr(obj, attrmap, attroffs):
"""
Internal function to generically get object attributes
Do not use unless you know what you are doing
"""
if attroffs in attrmap and hasattr(obj, attrmap[attroffs][1]):
return getattr(obj, attrmap[attroffs][1])
else:
errormsg = "attribute with offset %d not found, check the offset and report the problem" % attroffs
raise KeyError(errormsg)
def _IDC_SetAttr(obj, attrmap, attroffs, value):
"""
Internal function to generically set object attributes
Do not use unless you know what you are doing
"""
# check for read-only atributes
if attroffs in attrmap:
if attrmap[attroffs][0]:
raise KeyError("attribute with offset %d is read-only" % attroffs)
elif hasattr(obj, attrmap[attroffs][1]):
return setattr(obj, attrmap[attroffs][1], value)
errormsg = "attribute with offset %d not found, check the offset and report the problem" % attroffs
raise KeyError(errormsg)
BADADDR = ida_idaapi.BADADDR # Not allowed address value
BADSEL = ida_idaapi.BADSEL # Not allowed selector value/number
SIZE_MAX = _ida_idaapi.SIZE_MAX
ida_ida.__set_module_dynattrs(
__name__,
{
"MAXADDR" : (lambda: ida_ida.inf_get_privrange_start_ea(), None),
})
#
# Flag bit definitions (for get_full_flags())
#
MS_VAL = ida_bytes.MS_VAL # Mask for byte value
FF_IVL = ida_bytes.FF_IVL # Byte has value ?
# Do flags contain byte value? (i.e. has the byte a value?)
# if not, the byte is uninitialized.
def has_value(F): return ((F & FF_IVL) != 0) # any defined value?
def byte_value(F):
"""
Get byte value from flags
Get value of byte provided that the byte is initialized.
This macro works ok only for 8-bit byte machines.
"""
return (F & MS_VAL)
def is_loaded(ea):
"""Is the byte initialized?"""
return has_value(get_full_flags(ea)) # any defined value?
MS_CLS = ida_bytes.MS_CLS # Mask for typing
FF_CODE = ida_bytes.FF_CODE # Code ?
FF_DATA = ida_bytes.FF_DATA # Data ?
FF_TAIL = ida_bytes.FF_TAIL # Tail ?
FF_UNK = ida_bytes.FF_UNK # Unknown ?
def is_code(F): return ((F & MS_CLS) == FF_CODE) # is code byte?
def is_data(F): return ((F & MS_CLS) == FF_DATA) # is data byte?
def is_tail(F): return ((F & MS_CLS) == FF_TAIL) # is tail byte?
def is_unknown(F): return ((F & MS_CLS) == FF_UNK) # is unexplored byte?
def is_head(F): return ((F & FF_DATA) != 0) # is start of code/data?
#
# Common bits
#
MS_COMM = ida_bytes.MS_COMM # Mask of common bits
FF_COMM = ida_bytes.FF_COMM # Has comment?
FF_REF = ida_bytes.FF_REF # has references?
FF_LINE = ida_bytes.FF_LINE # Has next or prev cmt lines ?
FF_NAME = ida_bytes.FF_NAME # Has user-defined name ?
FF_LABL = ida_bytes.FF_LABL # Has dummy name?
FF_FLOW = ida_bytes.FF_FLOW # Exec flow from prev instruction?
FF_ANYNAME = FF_LABL | FF_NAME
def is_flow(F): return ((F & FF_FLOW) != 0)
def isExtra(F): return ((F & FF_LINE) != 0)
def isRef(F): return ((F & FF_REF) != 0)
def hasName(F): return ((F & FF_NAME) != 0)
def hasUserName(F): return ((F & FF_ANYNAME) == FF_NAME)
MS_0TYPE = ida_bytes.MS_0TYPE # Mask for 1st arg typing
FF_0VOID = ida_bytes.FF_0VOID # Void (unknown)?
FF_0NUMH = ida_bytes.FF_0NUMH # Hexadecimal number?
FF_0NUMD = ida_bytes.FF_0NUMD # Decimal number?
FF_0CHAR = ida_bytes.FF_0CHAR # Char ('x')?
FF_0SEG = ida_bytes.FF_0SEG # Segment?
FF_0OFF = ida_bytes.FF_0OFF # Offset?
FF_0NUMB = ida_bytes.FF_0NUMB # Binary number?
FF_0NUMO = ida_bytes.FF_0NUMO # Octal number?
FF_0ENUM = ida_bytes.FF_0ENUM # Enumeration?
FF_0FOP = ida_bytes.FF_0FOP # Forced operand?
FF_0STRO = ida_bytes.FF_0STRO # Struct offset?
FF_0STK = ida_bytes.FF_0STK # Stack variable?
MS_1TYPE = ida_bytes.MS_1TYPE # Mask for 2nd arg typing
FF_1VOID = ida_bytes.FF_1VOID # Void (unknown)?
FF_1NUMH = ida_bytes.FF_1NUMH # Hexadecimal number?
FF_1NUMD = ida_bytes.FF_1NUMD # Decimal number?
FF_1CHAR = ida_bytes.FF_1CHAR # Char ('x')?
FF_1SEG = ida_bytes.FF_1SEG # Segment?
FF_1OFF = ida_bytes.FF_1OFF # Offset?
FF_1NUMB = ida_bytes.FF_1NUMB # Binary number?
FF_1NUMO = ida_bytes.FF_1NUMO # Octal number?
FF_1ENUM = ida_bytes.FF_1ENUM # Enumeration?
FF_1FOP = ida_bytes.FF_1FOP # Forced operand?
FF_1STRO = ida_bytes.FF_1STRO # Struct offset?
FF_1STK = ida_bytes.FF_1STK # Stack variable?
# The following macros answer questions like
# 'is the 1st (or 2nd) operand of instruction or data of the given type'?
# Please note that data items use only the 1st operand type (is...0)
def is_defarg0(F): return ((F & MS_0TYPE) != FF_0VOID)
def is_defarg1(F): return ((F & MS_1TYPE) != FF_1VOID)
def isDec0(F): return ((F & MS_0TYPE) == FF_0NUMD)
def isDec1(F): return ((F & MS_1TYPE) == FF_1NUMD)
def isHex0(F): return ((F & MS_0TYPE) == FF_0NUMH)
def isHex1(F): return ((F & MS_1TYPE) == FF_1NUMH)
def isOct0(F): return ((F & MS_0TYPE) == FF_0NUMO)
def isOct1(F): return ((F & MS_1TYPE) == FF_1NUMO)
def isBin0(F): return ((F & MS_0TYPE) == FF_0NUMB)
def isBin1(F): return ((F & MS_1TYPE) == FF_1NUMB)
def is_off0(F): return ((F & MS_0TYPE) == FF_0OFF)
def is_off1(F): return ((F & MS_1TYPE) == FF_1OFF)
def is_char0(F): return ((F & MS_0TYPE) == FF_0CHAR)
def is_char1(F): return ((F & MS_1TYPE) == FF_1CHAR)
def is_seg0(F): return ((F & MS_0TYPE) == FF_0SEG)
def is_seg1(F): return ((F & MS_1TYPE) == FF_1SEG)
def is_enum0(F): return ((F & MS_0TYPE) == FF_0ENUM)
def is_enum1(F): return ((F & MS_1TYPE) == FF_1ENUM)
def is_manual0(F): return ((F & MS_0TYPE) == FF_0FOP)
def is_manual1(F): return ((F & MS_1TYPE) == FF_1FOP)
def is_stroff0(F): return ((F & MS_0TYPE) == FF_0STRO)
def is_stroff1(F): return ((F & MS_1TYPE) == FF_1STRO)
def is_stkvar0(F): return ((F & MS_0TYPE) == FF_0STK)
def is_stkvar1(F): return ((F & MS_1TYPE) == FF_1STK)
#
# Bits for DATA bytes
#
DT_TYPE = ida_bytes.DT_TYPE & 0xFFFFFFFF # Mask for DATA typing
FF_BYTE = ida_bytes.FF_BYTE & 0xFFFFFFFF # byte
FF_WORD = ida_bytes.FF_WORD & 0xFFFFFFFF # word
FF_DWORD = ida_bytes.FF_DWORD & 0xFFFFFFFF # dword
FF_QWORD = ida_bytes.FF_QWORD & 0xFFFFFFFF # qword
FF_TBYTE = ida_bytes.FF_TBYTE & 0xFFFFFFFF # tbyte
FF_STRLIT = ida_bytes.FF_STRLIT & 0xFFFFFFFF # ASCII ?
FF_STRUCT = ida_bytes.FF_STRUCT & 0xFFFFFFFF # Struct ?
FF_OWORD = ida_bytes.FF_OWORD & 0xFFFFFFFF # octaword (16 bytes)
FF_FLOAT = ida_bytes.FF_FLOAT & 0xFFFFFFFF # float
FF_DOUBLE = ida_bytes.FF_DOUBLE & 0xFFFFFFFF # double
FF_PACKREAL = ida_bytes.FF_PACKREAL & 0xFFFFFFFF # packed decimal real
FF_ALIGN = ida_bytes.FF_ALIGN & 0xFFFFFFFF # alignment directive
def is_byte(F): return (is_data(F) and (F & DT_TYPE) == FF_BYTE)
def is_word(F): return (is_data(F) and (F & DT_TYPE) == FF_WORD)
def is_dword(F): return (is_data(F) and (F & DT_TYPE) == FF_DWORD)
def is_qword(F): return (is_data(F) and (F & DT_TYPE) == FF_QWORD)
def is_oword(F): return (is_data(F) and (F & DT_TYPE) == FF_OWORD)
def is_tbyte(F): return (is_data(F) and (F & DT_TYPE) == FF_TBYTE)
def is_float(F): return (is_data(F) and (F & DT_TYPE) == FF_FLOAT)
def is_double(F): return (is_data(F) and (F & DT_TYPE) == FF_DOUBLE)
def is_pack_real(F): return (is_data(F) and (F & DT_TYPE) == FF_PACKREAL)
def is_strlit(F): return (is_data(F) and (F & DT_TYPE) == FF_STRLIT)
def is_struct(F): return (is_data(F) and (F & DT_TYPE) == FF_STRUCT)
def is_align(F): return (is_data(F) and (F & DT_TYPE) == FF_ALIGN)
#
# Bits for CODE bytes
#
MS_CODE = ida_bytes.MS_CODE & 0xFFFFFFFF
FF_FUNC = ida_bytes.FF_FUNC & 0xFFFFFFFF # function start?
FF_IMMD = ida_bytes.FF_IMMD & 0xFFFFFFFF # Has Immediate value ?
FF_JUMP = ida_bytes.FF_JUMP & 0xFFFFFFFF # Has jump table
#
# Loader flags
#
if ida_idaapi.uses_swig_builtins:
_scope = ida_loader.loader_t
else:
_scope = ida_loader
NEF_SEGS = _scope.NEF_SEGS # Create segments
NEF_RSCS = _scope.NEF_RSCS # Load resources
NEF_NAME = _scope.NEF_NAME # Rename entries
NEF_MAN = _scope.NEF_MAN # Manual load
NEF_FILL = _scope.NEF_FILL # Fill segment gaps
NEF_IMPS = _scope.NEF_IMPS # Create imports section
NEF_FIRST = _scope.NEF_FIRST # This is the first file loaded
NEF_CODE = _scope.NEF_CODE # for load_binary_file:
NEF_RELOAD = _scope.NEF_RELOAD # reload the file at the same place:
NEF_FLAT = _scope.NEF_FLAT # Autocreated FLAT group (PE)
# List of built-in functions
# --------------------------
#
# The following conventions are used in this list:
# 'ea' is a linear address
# 'success' is 0 if a function failed, 1 otherwise
# 'void' means that function returns no meaningful value (always 0)
#
# All function parameter conversions are made automatically.
#
# ----------------------------------------------------------------------------
# M I S C E L L A N E O U S
# ----------------------------------------------------------------------------
def value_is_string(var): raise NotImplementedError("this function is not needed in Python")
def value_is_long(var): raise NotImplementedError("this function is not needed in Python")
def value_is_float(var): raise NotImplementedError("this function is not needed in Python")
def value_is_func(var): raise NotImplementedError("this function is not needed in Python")
def value_is_pvoid(var): raise NotImplementedError("this function is not needed in Python")
def value_is_int64(var): raise NotImplementedError("this function is not needed in Python")
def to_ea(seg, off):
"""
Return value of expression: ((seg<<4) + off)
"""
return (seg << 4) + off
def form(format, *args):
raise DeprecatedIDCError("form() is deprecated. Use python string operations instead.")
def substr(s, x1, x2):
raise DeprecatedIDCError("substr() is deprecated. Use python string operations instead.")
def strstr(s1, s2):
raise DeprecatedIDCError("strstr() is deprecated. Use python string operations instead.")
def strlen(s):
raise DeprecatedIDCError("strlen() is deprecated. Use python string operations instead.")
def xtol(s):
raise DeprecatedIDCError("xtol() is deprecated. Use python long() instead.")
def atoa(ea):
"""
Convert address value to a string
Return address in the form 'seg000:1234'
(the same as in line prefixes)
@param ea: address to format
"""
return ida_kernwin.ea2str(ea)
def ltoa(n, radix):
raise DeprecatedIDCError("ltoa() is deprecated. Use python string operations instead.")
def atol(s):
raise DeprecatedIDCError("atol() is deprecated. Use python long() instead.")
def rotate_left(value, count, nbits, offset):
"""
Rotate a value to the left (or right)
@param value: value to rotate
@param count: number of times to rotate. negative counter means
rotate to the right
@param nbits: number of bits to rotate
@param offset: offset of the first bit to rotate
@return: the value with the specified field rotated
all other bits are not modified
"""
assert offset >= 0, "offset must be >= 0"
assert nbits > 0, "nbits must be > 0"
mask = 2**(offset+nbits) - 2**offset
tmp = value & mask
if count > 0:
for x in range(count):
if (tmp >> (offset+nbits-1)) & 1:
tmp = (tmp << 1) | (1 << offset)
else:
tmp = (tmp << 1)
else:
for x in range(-count):
if (tmp >> offset) & 1:
tmp = (tmp >> 1) | (1 << (offset+nbits-1))
else:
tmp = (tmp >> 1)
value = (value-(value&mask)) | (tmp & mask)
return value
def rotate_dword(x, count): return rotate_left(x, count, 32, 0)
def rotate_word(x, count): return rotate_left(x, count, 16, 0)
def rotate_byte(x, count): return rotate_left(x, count, 8, 0)
# add_idc_hotkey return codes
IDCHK_OK = 0 # ok
IDCHK_ARG = -1 # bad argument(s)
IDCHK_KEY = -2 # bad hotkey name
IDCHK_MAX = -3 # too many IDC hotkeys
add_idc_hotkey = ida_kernwin.add_idc_hotkey
del_idc_hotkey = ida_kernwin.del_idc_hotkey
jumpto = ida_kernwin.jumpto
auto_wait = ida_auto.auto_wait
def eval_idc(expr):
"""
Evaluate an IDC expression
@param expr: an expression
@return: the expression value. If there are problems, the returned value will be "IDC_FAILURE: xxx"
where xxx is the error description
@note: Python implementation evaluates IDC only, while IDC can call other registered languages
"""
rv = ida_expr.idc_value_t()
err = ida_expr.eval_idc_expr(rv, BADADDR, expr)
if err:
return "IDC_FAILURE: "+err
else:
if rv.vtype == '\x02': # long
return rv.num
elif rv.vtype == '\x07': # VT_STR
return rv.c_str()
else:
raise NotImplementedError("eval_idc() supports only expressions returning strings or longs")
def EVAL_FAILURE(code):
"""
Check the result of eval_idc() for evaluation failures
@param code: result of eval_idc()
@return: True if there was an evaluation error
"""
return type(code) == bytes and code.startswith("IDC_FAILURE: ")
def save_database(idbname, flags=0):
"""
Save current database to the specified idb file
@param idbname: name of the idb file. if empty, the current idb
file will be used.
@param flags: combination of ida_loader.DBFL_... bits or 0
"""
if len(idbname) == 0:
idbname = get_idb_path()
mask = ida_loader.DBFL_KILL | ida_loader.DBFL_COMP | ida_loader.DBFL_BAK
return ida_loader.save_database(idbname, flags & mask)
DBFL_BAK = ida_loader.DBFL_BAK # for compatiblity with older versions, eventually delete this
def validate_idb_names(do_repair = 0):
"""
check consistency of IDB name records
@param do_repair: try to repair netnode header it TRUE
@return: number of inconsistent name records
"""
return ida_nalt.validate_idb_names(do_repair)
qexit = ida_pro.qexit
def call_system(command):
"""
Execute an OS command.
@param command: command line to execute
@return: error code from OS
@note:
IDA will wait for the started program to finish.
In order to start the command in parallel, use OS methods.
For example, you may start another program in parallel using
"start" command.
"""
return os.system(command)
def qsleep(milliseconds):
"""
qsleep the specified number of milliseconds
This function suspends IDA for the specified amount of time
@param milliseconds: time to sleep
"""
time.sleep(float(milliseconds)/1000)
load_and_run_plugin = ida_loader.load_and_run_plugin
plan_to_apply_idasgn = ida_funcs.plan_to_apply_idasgn
#----------------------------------------------------------------------------
# C H A N G E P R O G R A M R E P R E S E N T A T I O N
#----------------------------------------------------------------------------
def delete_all_segments():
"""
Delete all segments, instructions, comments, i.e. everything
except values of bytes.
"""
ea = ida_ida.cvar.inf.min_ea
# Brute-force nuke all info from all the heads
while ea != BADADDR and ea <= ida_ida.cvar.inf.max_ea:
ida_name.del_local_name(ea)
ida_name.del_global_name(ea)
func = ida_funcs.get_func(ea)
if func:
ida_funcs.set_func_cmt(func, "", False)
ida_funcs.set_func_cmt(func, "", True)
ida_funcs.del_func(ea)
ida_bytes.del_hidden_range(ea)
seg = ida_segment.getseg(ea)
if seg:
ida_segment.set_segment_cmt(seg, "", False)
ida_segment.set_segment_cmt(seg, "", True)
ida_segment.del_segm(ea, ida_segment.SEGMOD_KEEP | ida_segment.SEGMOD_SILENT)
ea = ida_bytes.next_head(ea, ida_ida.cvar.inf.max_ea)
create_insn = ida_ua.create_insn
def plan_and_wait(sEA, eEA, final_pass=True):
"""
Perform full analysis of the range
@param sEA: starting linear address
@param eEA: ending linear address (excluded)
@param final_pass: make the final pass over the specified range
@return: 1-ok, 0-Ctrl-Break was pressed.
"""
return ida_auto.plan_and_wait(sEA, eEA, final_pass)
def set_name(ea, name, flags=ida_name.SN_CHECK):
"""
Rename an address
@param ea: linear address
@param name: new name of address. If name == "", then delete old name
@param flags: combination of SN_... constants
@return: 1-ok, 0-failure
"""
return ida_name.set_name(ea, name, flags)
SN_CHECK = ida_name.SN_CHECK
SN_NOCHECK = ida_name.SN_NOCHECK # Don't fail if the name contains invalid characters.
# If this bit is clear, all invalid chars
# (those !is_ident_cp()) will be replaced
# by SUBSTCHAR (usually '_').
# List of valid characters is defined in ida.cfg
SN_PUBLIC = ida_name.SN_PUBLIC # if set, make name public
SN_NON_PUBLIC = ida_name.SN_NON_PUBLIC # if set, make name non-public
SN_WEAK = ida_name.SN_WEAK # if set, make name weak
SN_NON_WEAK = ida_name.SN_NON_WEAK # if set, make name non-weak
SN_AUTO = ida_name.SN_AUTO # if set, make name autogenerated
SN_NON_AUTO = ida_name.SN_NON_AUTO # if set, make name non-autogenerated
SN_NOLIST = ida_name.SN_NOLIST # if set, exclude name from the list
# if not set, then include the name into
# the list (however, if other bits are set,
# the name might be immediately excluded
# from the list)
SN_NOWARN = ida_name.SN_NOWARN # don't display a warning if failed
SN_LOCAL = ida_name.SN_LOCAL # create local name. a function should exist.
# local names can't be public or weak.
# also they are not included into the list
# of names they can't have dummy prefixes
set_cmt = ida_bytes.set_cmt
def make_array(ea, nitems):
"""
Create an array.
@param ea: linear address
@param nitems: size of array in items
@note: This function will create an array of the items with the same type as
the type of the item at 'ea'. If the byte at 'ea' is undefined, then
this function will create an array of bytes.
"""
flags = ida_bytes.get_flags(ea)
if ida_bytes.is_code(flags) or ida_bytes.is_tail(flags) or ida_bytes.is_align(flags):
return False
if ida_bytes.is_unknown(flags):
flags = ida_bytes.FF_BYTE
if ida_bytes.is_struct(flags):
ti = ida_nalt.opinfo_t()
assert ida_bytes.get_opinfo(ti, ea, 0, flags), "get_opinfo() failed"
itemsize = ida_bytes.get_data_elsize(ea, flags, ti)
tid = ti.tid
else:
itemsize = ida_bytes.get_item_size(ea)
tid = BADADDR
return ida_bytes.create_data(ea, flags, itemsize*nitems, tid)
def create_strlit(ea, endea):
"""
Create a string.
This function creates a string (the string type is determined by the
value of get_inf_attr(INF_STRTYPE))
@param ea: linear address
@param endea: ending address of the string (excluded)
if endea == BADADDR, then length of string will be calculated
by the kernel
@return: 1-ok, 0-failure
@note: The type of an existing string is returned by get_str_type()
"""
return ida_bytes.create_strlit(ea, 0 if endea == BADADDR else endea - ea, get_inf_attr(INF_STRTYPE))
create_data = ida_bytes.create_data
def create_byte(ea):
"""
Convert the current item to a byte
@param ea: linear address
@return: 1-ok, 0-failure
"""
return ida_bytes.create_byte(ea, 1)
def create_word(ea):
"""
Convert the current item to a word (2 bytes)
@param ea: linear address
@return: 1-ok, 0-failure
"""
return ida_bytes.create_word(ea, 2)
def create_dword(ea):
"""
Convert the current item to a double word (4 bytes)
@param ea: linear address
@return: 1-ok, 0-failure
"""
return ida_bytes.create_dword(ea, 4)
def create_qword(ea):
"""
Convert the current item to a quadro word (8 bytes)
@param ea: linear address
@return: 1-ok, 0-failure
"""
return ida_bytes.create_qword(ea, 8)
def create_oword(ea):
"""
Convert the current item to an octa word (16 bytes/128 bits)
@param ea: linear address
@return: 1-ok, 0-failure
"""
return ida_bytes.create_oword(ea, 16)
def create_yword(ea):
"""
Convert the current item to a ymm word (32 bytes/256 bits)
@param ea: linear address
@return: 1-ok, 0-failure
"""
return ida_bytes.create_yword(ea, 32)
def create_float(ea):
"""
Convert the current item to a floating point (4 bytes)
@param ea: linear address
@return: 1-ok, 0-failure
"""
return ida_bytes.create_float(ea, 4)
def create_double(ea):
"""
Convert the current item to a double floating point (8 bytes)
@param ea: linear address
@return: 1-ok, 0-failure
"""
return ida_bytes.create_double(ea, 8)
def create_pack_real(ea):
"""
Convert the current item to a packed real (10 or 12 bytes)
@param ea: linear address
@return: 1-ok, 0-failure
"""
return ida_bytes.create_packed_real(ea, ida_idp.ph_get_tbyte_size())
def create_tbyte(ea):
"""
Convert the current item to a tbyte (10 or 12 bytes)
@param ea: linear address
@return: 1-ok, 0-failure
"""
return ida_bytes.create_tbyte(ea, ida_idp.ph_get_tbyte_size())
def create_struct(ea, size, strname):
"""
Convert the current item to a structure instance
@param ea: linear address
@param size: structure size in bytes. -1 means that the size
will be calculated automatically
@param strname: name of a structure type
@return: 1-ok, 0-failure
"""
strid = ida_struct.get_struc_id(strname)
if size == -1:
size = ida_struct.get_struc_size(strid)
return ida_bytes.create_struct(ea, size, strid)
create_custom_data = ida_bytes.create_custdata
create_align = ida_bytes.create_align
def define_local_var(start, end, location, name):
"""
Create a local variable
@param start: start of address range for the local variable
@param end: end of address range for the local variable
@param location: the variable location in the "[bp+xx]" form where xx is
a number. The location can also be specified as a
register name.
@param name: name of the local variable
@return: 1-ok, 0-failure
@note: For the stack variables the end address is ignored.
If there is no function at 'start' then this function.
will fail.
"""
func = ida_funcs.get_func(start)
if not func:
return 0
# Find out if location is in the [bp+xx] form
r = re.compile("\[([a-z]+)([-+][0-9a-fx]+)", re.IGNORECASE)
m = r.match(location)
if m:
# Location in the form of [bp+xx]
register = ida_idp.str2reg(m.group(1))
if register == -1:
return 0
offset = int(m.group(2), 0)
return 1 if ida_frame.define_stkvar(func, name, offset, ida_bytes.byte_flag(), None, 1) else 0
else:
# Location as simple register name
return ida_frame.add_regvar(func, start, end, location, name, None)
del_items = ida_bytes.del_items
DELIT_SIMPLE = ida_bytes.DELIT_SIMPLE # simply undefine the specified item
DELIT_EXPAND = ida_bytes.DELIT_EXPAND # propogate undefined items, for example
# if removing an instruction removes all
# references to the next instruction, then
# plan to convert to unexplored the next
# instruction too.
DELIT_DELNAMES = ida_bytes.DELIT_DELNAMES # delete any names at the specified address(es)
def set_array_params(ea, flags, litems, align):
"""
Set array representation format
@param ea: linear address
@param flags: combination of AP_... constants or 0
@param litems: number of items per line. 0 means auto
@param align: element alignment
- -1: do not align
- 0: automatic alignment
- other values: element width
@return: 1-ok, 0-failure
"""
return eval_idc("set_array_params(0x%X, 0x%X, %d, %d)"%(ea, flags, litems, align))
AP_ALLOWDUPS = 0x00000001 # use 'dup' construct
AP_SIGNED = 0x00000002 # treats numbers as signed
AP_INDEX = 0x00000004 # display array element indexes as comments
AP_ARRAY = 0x00000008 # reserved (this flag is not stored in database)
AP_IDXBASEMASK = 0x000000F0 # mask for number base of the indexes
AP_IDXDEC = 0x00000000 # display indexes in decimal
AP_IDXHEX = 0x00000010 # display indexes in hex
AP_IDXOCT = 0x00000020 # display indexes in octal
AP_IDXBIN = 0x00000030 # display indexes in binary
op_bin = ida_bytes.op_bin
op_oct = ida_bytes.op_oct
op_dec = ida_bytes.op_dec
op_hex = ida_bytes.op_hex
op_chr = ida_bytes.op_chr
def op_plain_offset(ea, n, base):
"""
Convert operand to an offset
(for the explanations of 'ea' and 'n' please see op_bin())
Example:
========
seg000:2000 dw 1234h
and there is a segment at paragraph 0x1000 and there is a data item
within the segment at 0x1234:
seg000:1234 MyString db 'Hello, world!',0
Then you need to specify a linear address of the segment base to
create a proper offset:
op_plain_offset(["seg000",0x2000],0,0x10000);
and you will have:
seg000:2000 dw offset MyString
Motorola 680x0 processor have a concept of "outer offsets".
If you want to create an outer offset, you need to combine number
of the operand with the following bit:
Please note that the outer offsets are meaningful only for
Motorola 680x0.
@param ea: linear address
@param n: number of operand
- 0 - the first operand
- 1 - the second, third and all other operands
- -1 - all operands
@param base: base of the offset as a linear address
If base == BADADDR then the current operand becomes non-offset
"""
if base == BADADDR:
return ida_bytes.clr_op_type(ea, n)
else:
return ida_offset.op_plain_offset(ea, n, base)
OPND_OUTER = ida_bytes.OPND_OUTER # outer offset base
op_offset = ida_offset.op_offset
REF_OFF8 = ida_nalt.REF_OFF8 # 8bit full offset
REF_OFF16 = ida_nalt.REF_OFF16 # 16bit full offset
REF_OFF32 = ida_nalt.REF_OFF32 # 32bit full offset
REF_LOW8 = ida_nalt.REF_LOW8 # low 8bits of 16bit offset
REF_LOW16 = ida_nalt.REF_LOW16 # low 16bits of 32bit offset
REF_HIGH8 = ida_nalt.REF_HIGH8 # high 8bits of 16bit offset
REF_HIGH16 = ida_nalt.REF_HIGH16 # high 16bits of 32bit offset
REF_OFF64 = ida_nalt.REF_OFF64 # 64bit full offset
REFINFO_RVA = 0x10 # based reference (rva)
REFINFO_PASTEND = 0x20 # reference past an item it may point to an nonexistitng
# do not destroy alignment dirs
REFINFO_NOBASE = 0x80 # offset base is a number
# that base have be any value
# nb: base xrefs are created only if base
# points to the middle of a segment
REFINFO_SUBTRACT = 0x0100 # the reference value is subtracted from
# the base value instead of (as usual)
# being added to it
REFINFO_SIGNEDOP = 0x0200 # the operand value is sign-extended (only
# supported for REF_OFF8/16/32/64)
op_seg = ida_bytes.op_seg
op_num = ida_bytes.op_num
op_flt = ida_bytes.op_flt
op_man = ida_bytes.set_forced_operand
toggle_sign = ida_bytes.toggle_sign
def toggle_bnot(ea, n):
"""
Toggle the bitwise not operator for the operand
@param ea: linear address
@param n: number of operand
- 0 - the first operand
- 1 - the second, third and all other operands
- -1 - all operands
"""
ida_bytes.toggle_bnot(ea, n)
return True
op_enum = ida_bytes.op_enum
def op_stroff(ea, n, strid, delta):
"""
Convert operand to an offset in a structure
@param ea: linear address
@param n: number of operand
- 0 - the first operand
- 1 - the second, third and all other operands
- -1 - all operands
@param strid: id of a structure type
@param delta: struct offset delta. usually 0. denotes the difference
between the structure base and the pointer into the structure.
"""
path = ida_pro.tid_array(1)
path[0] = strid
if isinstance(ea, ida_ua.insn_t):
insn = ea
else:
insn = ida_ua.insn_t()
ida_ua.decode_insn(insn, ea)
return ida_bytes.op_stroff(insn, n, path.cast(), 1, delta)
op_stkvar = ida_bytes.op_stkvar
def op_offset_high16(ea, n, target):
"""
Convert operand to a high offset
High offset is the upper 16bits of an offset.
This type is used by TMS320C6 processors (and probably by other
RISC processors too)
@param ea: linear address
@param n: number of operand
- 0 - the first operand
- 1 - the second, third and all other operands
- -1 - all operands
@param target: the full value (all 32bits) of the offset
"""
return ida_offset.op_offset(ea, n, ida_nalt.REF_HIGH16, target)
def MakeVar(ea):
pass
# Every anterior/posterior line has its number.
# Anterior lines have numbers from E_PREV
# Posterior lines have numbers from E_NEXT
E_PREV = ida_lines.E_PREV
E_NEXT = ida_lines.E_NEXT
get_extra_cmt = ida_lines.get_extra_cmt
update_extra_cmt = ida_lines.update_extra_cmt
del_extra_cmt = ida_lines.del_extra_cmt
set_manual_insn = ida_bytes.set_manual_insn
get_manual_insn = ida_bytes.get_manual_insn
patch_dbg_byte = ida_dbg.put_dbg_byte
patch_byte = ida_bytes.patch_byte
patch_word = ida_bytes.patch_word
patch_dword = ida_bytes.patch_dword
patch_qword = ida_bytes.patch_qword
SR_inherit = 1 # value is inherited from the previous range
SR_user = 2 # value is specified by the user
SR_auto = 3 # value is determined by IDA
SR_autostart = 4 # as SR_auto for segment starting address
def split_sreg_range(ea, reg, value, tag=SR_user):
"""
Set value of a segment register.
@param ea: linear address
@param reg: name of a register, like "cs", "ds", "es", etc.
@param value: new value of the segment register.
@param tag: of SR_... constants
@note: IDA keeps tracks of all the points where segment register change their
values. This function allows you to specify the correct value of a segment
register if IDA is not able to find the corrent value.
"""
reg = ida_idp.str2reg(reg);
if reg >= 0:
return ida_segregs.split_sreg_range(ea, reg, value, tag)
else:
return False
auto_mark_range = ida_auto.auto_mark_range
auto_unmark = ida_auto.auto_unmark
def AutoMark(ea,qtype):
"""
Plan to analyze an address
"""
return auto_mark_range(ea,ea+1,qtype)
AU_UNK = ida_auto.AU_UNK # make unknown
AU_CODE = ida_auto.AU_CODE # convert to instruction
AU_PROC = ida_auto.AU_PROC # make function
AU_USED = ida_auto.AU_USED # reanalyze
AU_LIBF = ida_auto.AU_LIBF # apply a flirt signature (the current signature!)
AU_FINAL = ida_auto.AU_FINAL # coagulate unexplored items
#----------------------------------------------------------------------------
# P R O D U C E O U T P U T F I L E S
#----------------------------------------------------------------------------
def gen_file(filetype, path, ea1, ea2, flags):
"""
Generate an output file
@param filetype: type of output file. One of OFILE_... symbols. See below.
@param path: the output file path (will be overwritten!)
@param ea1: start address. For some file types this argument is ignored
@param ea2: end address. For some file types this argument is ignored
@param flags: bit combination of GENFLG_...
@returns: number of the generated lines.
-1 if an error occurred
OFILE_EXE: 0-can't generate exe file, 1-ok
"""
f = ida_diskio.fopenWB(path)
if f:
retval = ida_loader.gen_file(filetype, f, ea1, ea2, flags)
ida_diskio.eclose(f)
return retval
else:
return -1
# output file types:
OFILE_MAP = ida_loader.OFILE_MAP
OFILE_EXE = ida_loader.OFILE_EXE
OFILE_IDC = ida_loader.OFILE_IDC
OFILE_LST = ida_loader.OFILE_LST
OFILE_ASM = ida_loader.OFILE_ASM
OFILE_DIF = ida_loader.OFILE_DIF
# output control flags:
GENFLG_MAPSEG = ida_loader.GENFLG_MAPSEG # map: generate map of segments
GENFLG_MAPNAME = ida_loader.GENFLG_MAPNAME # map: include dummy names
GENFLG_MAPDMNG = ida_loader.GENFLG_MAPDMNG # map: demangle names
GENFLG_MAPLOC = ida_loader.GENFLG_MAPLOC # map: include local names
GENFLG_IDCTYPE = ida_loader.GENFLG_IDCTYPE # idc: gen only information about types
GENFLG_ASMTYPE = ida_loader.GENFLG_ASMTYPE # asm&lst: gen information about types too
GENFLG_GENHTML = ida_loader.GENFLG_GENHTML # asm&lst: generate html (gui version only)
GENFLG_ASMINC = ida_loader.GENFLG_ASMINC # asm&lst: gen information only about types
def gen_flow_graph(outfile, title, ea1, ea2, flags):
"""
Generate a flow chart GDL file
@param outfile: output file name. GDL extension will be used
@param title: graph title
@param ea1: beginning of the range to flow chart
@param ea2: end of the range to flow chart.
@param flags: combination of CHART_... constants
@note: If ea2 == BADADDR then ea1 is treated as an address within a function.
That function will be flow charted.
"""
return ida_gdl.gen_flow_graph(outfile, title, None, ea1, ea2, flags)
CHART_PRINT_NAMES = 0x1000 # print labels for each block?
CHART_GEN_GDL = 0x4000 # generate .gdl file (file extension is forced to .gdl)
CHART_WINGRAPH = 0x8000 # call wingraph32 to display the graph
CHART_NOLIBFUNCS = 0x0400 # don't include library functions in the graph
def gen_simple_call_chart(outfile, title, flags):
"""
Generate a function call graph GDL file
@param outfile: output file name. GDL extension will be used
@param title: graph title
@param flags: combination of CHART_GEN_GDL, CHART_WINGRAPH, CHART_NOLIBFUNCS
"""
return ida_gdl.gen_simple_call_chart(outfile, "Generating chart", title, flags)
#----------------------------------------------------------------------------
# C O M M O N I N F O R M A T I O N
#----------------------------------------------------------------------------
def idadir():
"""
Get IDA directory
This function returns the directory where IDA.EXE resides
"""
return ida_diskio.idadir("")
get_root_filename = ida_nalt.get_root_filename
get_input_file_path = ida_nalt.get_input_file_path
set_root_filename = ida_nalt.set_root_filename
def get_idb_path():
"""
Get IDB full path
This function returns full path of the current IDB database
"""
return ida_loader.get_path(ida_loader.PATH_TYPE_IDB)
retrieve_input_file_md5 = ida_nalt.retrieve_input_file_md5
get_full_flags = ida_bytes.get_full_flags
get_db_byte = ida_bytes.get_db_byte
def get_bytes(ea, size, use_dbg = False):
"""
Return the specified number of bytes of the program
@param ea: linear address
@param size: size of buffer in normal 8-bit bytes
@param use_dbg: if True, use debugger memory, otherwise just the database
@return: None on failure
otherwise a string containing the read bytes
"""
if use_dbg:
return ida_idd.dbg_read_memory(ea, size)
else:
return ida_bytes.get_bytes(ea, size)
get_wide_byte = ida_bytes.get_wide_byte
def __DbgValue(ea, len):
if len not in ida_idaapi.__struct_unpack_table:
return None
r = ida_idd.dbg_read_memory(ea, len)
return None if r is None else struct.unpack((">" if ida_ida.cvar.inf.is_be() else "<") + ida_idaapi.__struct_unpack_table[len][1], r)[0]
def read_dbg_byte(ea):
"""
Get value of program byte using the debugger memory
@param ea: linear address
@return: The value or None on failure.
"""
return __DbgValue(ea, 1)
def read_dbg_word(ea):
"""
Get value of program word using the debugger memory
@param ea: linear address
@return: The value or None on failure.
"""
return __DbgValue(ea, 2)
def read_dbg_dword(ea):
"""
Get value of program double-word using the debugger memory
@param ea: linear address
@return: The value or None on failure.
"""
return __DbgValue(ea, 4)
def read_dbg_qword(ea):
"""
Get value of program quadro-word using the debugger memory
@param ea: linear address
@return: The value or None on failure.
"""
return __DbgValue(ea, 8)
read_dbg_memory = ida_idd.dbg_read_memory
def write_dbg_memory(ea, data):
"""
Write to debugger memory.
@param ea: linear address
@param data: string to write
@return: number of written bytes (-1 - network/debugger error)
Thread-safe function (may be called only from the main thread and debthread)
"""
__warn_once_deprecated_proto_confusion("write_dbg_memory", "ida_dbg.write_dbg_memory")
if not ida_dbg.dbg_can_query():
return -1
elif len(data) > 0:
return ida_idd.dbg_write_memory(ea, data)
get_original_byte = ida_bytes.get_original_byte
get_wide_word = ida_bytes.get_wide_word
get_wide_dword = ida_bytes.get_wide_dword
get_qword = ida_bytes.get_qword
def GetFloat(ea):
"""
Get value of a floating point number (4 bytes)
This function assumes number stored using IEEE format
and in the same endianness as integers.
@param ea: linear address
@return: float
"""
tmp = struct.pack("I", get_wide_dword(ea))
return struct.unpack("f", tmp)[0]
def GetDouble(ea):
"""
Get value of a floating point number (8 bytes)
This function assumes number stored using IEEE format
and in the same endianness as integers.
@param ea: linear address
@return: double
"""
tmp = struct.pack("Q", get_qword(ea))
return struct.unpack("d", tmp)[0]
def get_name_ea_simple(name):
"""
Get linear address of a name
@param name: name of program byte
@return: address of the name
BADADDR - No such name
"""
return ida_name.get_name_ea(BADADDR, name)
get_name_ea = ida_name.get_name_ea
def get_segm_by_sel(base):
"""
Get segment by segment base
@param base: segment base paragraph or selector
@return: linear address of the start of the segment or BADADDR
if no such segment
"""
sel = ida_segment.find_selector(base)
seg = ida_segment.get_segm_by_sel(sel)
if seg:
return seg.start_ea
else:
return BADADDR
get_screen_ea = ida_kernwin.get_screen_ea
def get_curline():
"""
Get the disassembly line at the cursor
@return: string
"""
return ida_lines.tag_remove(ida_kernwin.get_curline())
def read_selection_start():
"""
Get start address of the selected range
returns BADADDR - the user has not selected an range
"""
selection, startaddr, endaddr = ida_kernwin.read_range_selection(None)
if selection == 1:
return startaddr
else:
return BADADDR
def read_selection_end():
"""
Get end address of the selected range
@return: BADADDR - the user has not selected an range
"""
selection, startaddr, endaddr = ida_kernwin.read_range_selection(None)
if selection == 1:
return endaddr
else:
return BADADDR
def get_sreg(ea, reg):
"""
Get value of segment register at the specified address
@param ea: linear address
@param reg: name of segment register
@return: the value of the segment register or -1 on error
@note: The segment registers in 32bit program usually contain selectors,
so to get paragraph pointed to by the segment register you need to
call sel2para() function.
"""
reg = ida_idp.str2reg(reg);
if reg >= 0:
return ida_segregs.get_sreg(ea, reg)
else:
return -1
next_addr = ida_bytes.next_addr
prev_addr = ida_bytes.prev_addr
def next_head(ea, maxea=BADADDR):
"""
Get next defined item (instruction or data) in the program
@param ea: linear address to start search from
@param maxea: the search will stop at the address
maxea is not included in the search range
@return: BADADDR - no (more) defined items
"""
return ida_bytes.next_head(ea, maxea)
def prev_head(ea, minea=0):
"""
Get previous defined item (instruction or data) in the program
@param ea: linear address to start search from
@param minea: the search will stop at the address
minea is included in the search range
@return: BADADDR - no (more) defined items
"""
return ida_bytes.prev_head(ea, minea)
next_not_tail = ida_bytes.next_not_tail
prev_not_tail = ida_bytes.prev_not_tail
get_item_head = ida_bytes.get_item_head
get_item_end = ida_bytes.get_item_end
def get_item_size(ea):
"""
Get size of instruction or data item in bytes
@param ea: linear address
@return: 1..n
"""
return ida_bytes.get_item_end(ea) - ea
def func_contains(func_ea, ea):
"""
Does the given function contain the given address?
@param func_ea: any address belonging to the function
@param ea: linear address
@return: success
"""
func = ida_funcs.get_func(func_ea)
if func:
return ida_funcs.func_contains(func, ea)
return False
GN_VISIBLE = ida_name.GN_VISIBLE # replace forbidden characters by SUBSTCHAR
GN_COLORED = ida_name.GN_COLORED # return colored name
GN_DEMANGLED = ida_name.GN_DEMANGLED # return demangled name
GN_STRICT = ida_name.GN_STRICT # fail if cannot demangle
GN_SHORT = ida_name.GN_SHORT # use short form of demangled name
GN_LONG = ida_name.GN_LONG # use long form of demangled name
GN_LOCAL = ida_name.GN_LOCAL # try to get local name first; if failed, get global
GN_ISRET = ida_name.GN_ISRET # for dummy names: use retloc
GN_NOT_ISRET = ida_name.GN_NOT_ISRET # for dummy names: do not use retloc
calc_gtn_flags = ida_name.calc_gtn_flags
def get_name(ea, gtn_flags=0):
"""
Get name at the specified address
@param ea: linear address
@param gtn_flags: how exactly the name should be retrieved.
combination of GN_ bits
@return: "" - byte has no name
"""
return ida_name.get_ea_name(ea, gtn_flags)
def demangle_name(name, disable_mask):
"""
demangle_name a name
@param name: name to demangle
@param disable_mask: a mask that tells how to demangle the name
it is a good idea to get this mask using
get_inf_attr(INF_SHORT_DN) or get_inf_attr(INF_LONG_DN)
@return: a demangled name
If the input name cannot be demangled, returns None
"""
return ida_name.demangle_name(name, disable_mask, ida_name.DQT_FULL)
def generate_disasm_line(ea, flags):
"""
Get disassembly line
@param ea: linear address of instruction
@param flags: combination of the GENDSM_ flags, or 0
@return: "" - could not decode instruction at the specified location
@note: this function may not return exactly the same mnemonics
as you see on the screen.
"""
text = ida_lines.generate_disasm_line(ea, flags)
if text:
return ida_lines.tag_remove(text)
else:
return ""
# flags for generate_disasm_line
# generate a disassembly line as if
# there is an instruction at 'ea'
GENDSM_FORCE_CODE = ida_lines.GENDSM_FORCE_CODE
# if the instruction consists of several lines,
# produce all of them (useful for parallel instructions)
GENDSM_MULTI_LINE = ida_lines.GENDSM_MULTI_LINE
def GetDisasm(ea):
"""
Get disassembly line
@param ea: linear address of instruction
@return: "" - could not decode instruction at the specified location
@note: this function may not return exactly the same mnemonics
as you see on the screen.
"""
return generate_disasm_line(ea, 0)
def print_insn_mnem(ea):
"""
Get instruction mnemonics
@param ea: linear address of instruction
@return: "" - no instruction at the specified location
@note: this function may not return exactly the same mnemonics
as you see on the screen.
"""
res = ida_ua.ua_mnem(ea)
if not res:
return ""
else:
return res
def print_operand(ea, n):
"""
Get operand of an instruction or data
@param ea: linear address of the item
@param n: number of operand:
0 - the first operand
1 - the second operand
@return: the current text representation of operand or ""
"""
res = ida_ua.print_operand(ea, n)
if not res:
return ""
else:
return ida_lines.tag_remove(res)
def get_operand_type(ea, n):
"""
Get type of instruction operand
@param ea: linear address of instruction
@param n: number of operand:
0 - the first operand
1 - the second operand
@return: any of o_* constants or -1 on error
"""
insn = ida_ua.insn_t()
inslen = ida_ua.decode_insn(insn, ea)
return -1 if inslen == 0 else insn.ops[n].type
o_void = ida_ua.o_void # No Operand ----------
o_reg = ida_ua.o_reg # General Register (al,ax,es,ds...) reg
o_mem = ida_ua.o_mem # Direct Memory Reference (DATA) addr
o_phrase = ida_ua.o_phrase # Memory Ref [Base Reg + Index Reg] phrase
o_displ = ida_ua.o_displ # Memory Reg [Base Reg + Index Reg + Displacement] phrase+addr
o_imm = ida_ua.o_imm # Immediate Value value
o_far = ida_ua.o_far # Immediate Far Address (CODE) addr
o_near = ida_ua.o_near # Immediate Near Address (CODE) addr
o_idpspec0 = ida_ua.o_idpspec0 # Processor specific type
o_idpspec1 = ida_ua.o_idpspec1 # Processor specific type
o_idpspec2 = ida_ua.o_idpspec2 # Processor specific type
o_idpspec3 = ida_ua.o_idpspec3 # Processor specific type
o_idpspec4 = ida_ua.o_idpspec4 # Processor specific type
o_idpspec5 = ida_ua.o_idpspec5 # Processor specific type
# There can be more processor specific types
# x86
o_trreg = ida_ua.o_idpspec0 # trace register
o_dbreg = ida_ua.o_idpspec1 # debug register
o_crreg = ida_ua.o_idpspec2 # control register
o_fpreg = ida_ua.o_idpspec3 # floating point register
o_mmxreg = ida_ua.o_idpspec4 # mmx register
o_xmmreg = ida_ua.o_idpspec5 # xmm register
# arm
o_reglist = ida_ua.o_idpspec1 # Register list (for LDM/STM)
o_creglist = ida_ua.o_idpspec2 # Coprocessor register list (for CDP)
o_creg = ida_ua.o_idpspec3 # Coprocessor register (for LDC/STC)
o_fpreglist = ida_ua.o_idpspec4 # Floating point register list
o_text = ida_ua.o_idpspec5 # Arbitrary text stored in the operand
o_cond = (ida_ua.o_idpspec5+1) # ARM condition as an operand
# ppc
o_spr = ida_ua.o_idpspec0 # Special purpose register
o_twofpr = ida_ua.o_idpspec1 # Two FPRs
o_shmbme = ida_ua.o_idpspec2 # SH & MB & ME
o_crf = ida_ua.o_idpspec3 # crfield x.reg
o_crb = ida_ua.o_idpspec4 # crbit x.reg
o_dcr = ida_ua.o_idpspec5 # Device control register
def get_operand_value(ea, n):
"""
Get number used in the operand
This function returns an immediate number used in the operand
@param ea: linear address of instruction
@param n: the operand number
@return: value
operand is an immediate value => immediate value
operand has a displacement => displacement
operand is a direct memory ref => memory address
operand is a register => register number
operand is a register phrase => phrase number
otherwise => -1
"""
insn = ida_ua.insn_t()
inslen = ida_ua.decode_insn(insn, ea)
if inslen == 0:
return -1
op = insn.ops[n]
if not op:
return -1
if op.type in [ ida_ua.o_mem, ida_ua.o_far, ida_ua.o_near, ida_ua.o_displ ]:
value = op.addr
elif op.type == ida_ua.o_reg:
value = op.reg
elif op.type == ida_ua.o_imm:
value = op.value
elif op.type == ida_ua.o_phrase:
value = op.phrase
else:
value = -1
return value
GetCommentEx = ida_bytes.get_cmt
get_cmt = GetCommentEx
get_forced_operand = ida_bytes.get_forced_operand
BPU_1B = ida_nalt.BPU_1B
BPU_2B = ida_nalt.BPU_2B
BPU_4B = ida_nalt.BPU_4B
STRWIDTH_1B = ida_nalt.STRWIDTH_1B
STRWIDTH_2B = ida_nalt.STRWIDTH_2B
STRWIDTH_4B = ida_nalt.STRWIDTH_4B
STRWIDTH_MASK = ida_nalt.STRWIDTH_MASK
STRLYT_TERMCHR = ida_nalt.STRLYT_TERMCHR
STRLYT_PASCAL1 = ida_nalt.STRLYT_PASCAL1
STRLYT_PASCAL2 = ida_nalt.STRLYT_PASCAL2
STRLYT_PASCAL4 = ida_nalt.STRLYT_PASCAL4
STRLYT_MASK = ida_nalt.STRLYT_MASK
STRLYT_SHIFT = ida_nalt.STRLYT_SHIFT
# Character-terminated string. The termination characters
# are kept in the next bytes of string type.
STRTYPE_TERMCHR = ida_nalt.STRTYPE_TERMCHR
# C-style string.
STRTYPE_C = ida_nalt.STRTYPE_C
# Zero-terminated 16bit chars
STRTYPE_C_16 = ida_nalt.STRTYPE_C_16
# Zero-terminated 32bit chars
STRTYPE_C_32 = ida_nalt.STRTYPE_C_32
# Pascal-style, one-byte length prefix
STRTYPE_PASCAL = ida_nalt.STRTYPE_PASCAL
# Pascal-style, 16bit chars, one-byte length prefix
STRTYPE_PASCAL_16 = ida_nalt.STRTYPE_PASCAL_16
# Pascal-style, two-byte length prefix
STRTYPE_LEN2 = ida_nalt.STRTYPE_LEN2
# Pascal-style, 16bit chars, two-byte length prefix
STRTYPE_LEN2_16 = ida_nalt.STRTYPE_LEN2_16
# Pascal-style, four-byte length prefix
STRTYPE_LEN4 = ida_nalt.STRTYPE_LEN4
# Pascal-style, 16bit chars, four-byte length prefix
STRTYPE_LEN4_16 = ida_nalt.STRTYPE_LEN4_16
# alias
STRTYPE_C16 = STRTYPE_C_16
def get_strlit_contents(ea, length = -1, strtype = STRTYPE_C):
"""
Get string contents
@param ea: linear address
@param length: string length. -1 means to calculate the max string length
@param strtype: the string type (one of STRTYPE_... constants)
@return: string contents or empty string
"""
if length == -1:
length = ida_bytes.get_max_strlit_length(ea, strtype, ida_bytes.ALOPT_IGNHEADS)
return ida_bytes.get_strlit_contents(ea, length, strtype)
def get_str_type(ea):
"""
Get string type
@param ea: linear address
@return: One of STRTYPE_... constants
"""
flags = ida_bytes.get_flags(ea)
if ida_bytes.is_strlit(flags):
oi = ida_nalt.opinfo_t()
if ida_bytes.get_opinfo(oi, ea, 0, flags):
return oi.strtype
# The following functions search for the specified byte
# ea - address to start from
# flag is combination of the following bits
# returns BADADDR - not found
find_suspop = ida_search.find_suspop
find_code = ida_search.find_code
find_data = ida_search.find_data
find_unknown = ida_search.find_unknown
find_defined = ida_search.find_defined
find_imm = ida_search.find_imm
SEARCH_UP = ida_search.SEARCH_UP # search backward
SEARCH_DOWN = ida_search.SEARCH_DOWN # search forward
SEARCH_NEXT = ida_search.SEARCH_NEXT # start the search at the next/prev item
# useful only for find_text() and find_binary()
SEARCH_CASE = ida_search.SEARCH_CASE # search case-sensitive
# (only for bin&txt search)
SEARCH_REGEX = ida_search.SEARCH_REGEX # enable regular expressions (only for text)
SEARCH_NOBRK = ida_search.SEARCH_NOBRK # don't test ctrl-break
SEARCH_NOSHOW = ida_search.SEARCH_NOSHOW # don't display the search progress
def find_text(ea, flag, y, x, searchstr):
__warn_once_deprecated_proto_confusion("find_text", "ida_search.find_text")
return ida_search.find_text(ea, y, x, searchstr, flag)
def find_binary(ea, flag, searchstr, radix=16):
__warn_once_deprecated_proto_confusion("find_binary", "ida_search.find_binary")
endea = flag & 1 and ida_ida.cvar.inf.max_ea or ida_ida.cvar.inf.min_ea
return ida_search.find_binary(ea, endea, searchstr, radix, flag)
#----------------------------------------------------------------------------
# G L O B A L S E T T I N G S M A N I P U L A T I O N
#----------------------------------------------------------------------------
def process_config_line(directive):
"""
Obsolete. Please use ida_idp.process_config_directive().
"""
return eval_idc('process_config_directive("%s")' % ida_kernwin.str2user(directive))
# The following functions allow you to set/get common parameters.
# Please note that not all parameters can be set directly.
INF_VERSION = 0 # short; Version of database
INF_PROCNAME = 1 # char[8]; Name of current processor
INF_GENFLAGS = 2 # ushort; General flags:
INF_LFLAGS = 3 # uint32; IDP-dependent flags
INF_DATABASE_CHANGE_COUNT= 4 # uint32; database change counter; keeps track of byte and segment modifications
INF_CHANGE_COUNTER=INF_DATABASE_CHANGE_COUNT
INF_FILETYPE = 5 # short; type of input file (see ida.hpp)
FT_EXE_OLD = 0 # MS DOS EXE File (obsolete)
FT_COM_OLD = 1 # MS DOS COM File (obsolete)
FT_BIN = 2 # Binary File
FT_DRV = 3 # MS DOS Driver
FT_WIN = 4 # New Executable (NE)
FT_HEX = 5 # Intel Hex Object File
FT_MEX = 6 # MOS Technology Hex Object File
FT_LX = 7 # Linear Executable (LX)
FT_LE = 8 # Linear Executable (LE)
FT_NLM = 9 # Netware Loadable Module (NLM)
FT_COFF = 10 # Common Object File Format (COFF)
FT_PE = 11 # Portable Executable (PE)
FT_OMF = 12 # Object Module Format
FT_SREC = 13 # R-records
FT_ZIP = 14 # ZIP file (this file is never loaded to IDA database)
FT_OMFLIB = 15 # Library of OMF Modules
FT_AR = 16 # ar library
FT_LOADER = 17 # file is loaded using LOADER DLL
FT_ELF = 18 # Executable and Linkable Format (ELF)
FT_W32RUN = 19 # Watcom DOS32 Extender (W32RUN)
FT_AOUT = 20 # Linux a.out (AOUT)
FT_PRC = 21 # PalmPilot program file
FT_EXE = 22 # MS DOS EXE File
FT_COM = 23 # MS DOS COM File
FT_AIXAR = 24 # AIX ar library
FT_MACHO = 25 # Mac OS X Mach-O file
INF_OSTYPE = 6 # short; FLIRT: OS type the program is for
OSTYPE_MSDOS= 0x0001
OSTYPE_WIN = 0x0002
OSTYPE_OS2 = 0x0004
OSTYPE_NETW = 0x0008
INF_APPTYPE = 7 # short; FLIRT: Application type
APPT_CONSOLE= 0x0001 # console
APPT_GRAPHIC= 0x0002 # graphics
APPT_PROGRAM= 0x0004 # EXE
APPT_LIBRARY= 0x0008 # DLL
APPT_DRIVER = 0x0010 # DRIVER
APPT_1THREAD= 0x0020 # Singlethread
APPT_MTHREAD= 0x0040 # Multithread
APPT_16BIT = 0x0080 # 16 bit application
APPT_32BIT = 0x0100 # 32 bit application
INF_ASMTYPE = 8 # char; target assembler number (0..n)
INF_SPECSEGS = 9
INF_AF = 10 # uint32; Analysis flags:
def _import_module_flag_sets(module, prefixes):
if isinstance(prefixes, str):
prefixes = [prefixes]
for prefix in prefixes:
for key in dir(module):
if key.startswith(prefix):
value = getattr(module, key)
if isinstance(value, ida_idaapi.integer_types):
globals()[key] = value
_import_module_flag_sets(
ida_ida,
[
"INFFL_",
"LFLG_",
"IDB_",
"AF_",
"AF2_",
"SW_",
"NM_",
"DEMNAM_",
"LN_",
"OFLG_",
"SCF_",
"LMT_",
"PREF_",
"STRF_",
"ABI_",
])
INF_AF2 = 11 # uint32; Analysis flags 2
INF_BASEADDR = 12 # uval_t; base paragraph of the program
INF_START_SS = 13 # int32; value of SS at the start
INF_START_CS = 14 # int32; value of CS at the start
INF_START_IP = 15 # ea_t; IP register value at the start of
# program execution
INF_START_EA = 16 # ea_t; Linear address of program entry point
INF_START_SP = 17 # ea_t; SP register value at the start of
# program execution
INF_MAIN = 18 # ea_t; address of main()
INF_MIN_EA = 19 # ea_t; The lowest address used
# in the program
INF_MAX_EA = 20 # ea_t; The highest address used
# in the program - 1
INF_OMIN_EA = 21
INF_OMAX_EA = 22
INF_LOWOFF = 23 # ea_t; low limit of voids
INF_LOW_OFF=INF_LOWOFF
INF_HIGHOFF = 24 # ea_t; high limit of voids
INF_HIGH_OFF=INF_HIGHOFF
INF_MAXREF = 25 # uval_t; max xref depth
INF_PRIVRANGE_START_EA = 27 # uval_t; Range of addresses reserved for internal use.
INF_START_PRIVRANGE=INF_PRIVRANGE_START_EA
INF_PRIVRANGE_END_EA = 28 # uval_t; Initially (MAXADDR, MAXADDR+0x100000)
INF_END_PRIVRANGE=INF_PRIVRANGE_END_EA
INF_NETDELTA = 29 # sval_t; Delta value to be added to all adresses for mapping to netnodes.
# Initially 0.
# CROSS REFERENCES
INF_XREFNUM = 30 # char; Number of references to generate
# 0 - xrefs won't be generated at all
INF_TYPE_XREFNUM = 31 # char; Number of references to generate
# in the struct & enum windows
# 0 - xrefs won't be generated at all
INF_TYPE_XREFS=INF_TYPE_XREFNUM
INF_REFCMTNUM = 32 # uchar; number of comment lines to
# generate for refs to ASCII
# string or demangled name
# 0 - such comments won't be
# generated at all
INF_REFCMTS=INF_REFCMTNUM
INF_XREFFLAG = 33 # char; xrefs representation:
INF_XREFS=INF_XREFFLAG
# NAMES
INF_MAX_AUTONAME_LEN = 34 # ushort; max name length (without zero byte)
INF_NAMETYPE = 35 # char; dummy names represenation type
INF_SHORT_DEMNAMES = 36 # int32; short form of demangled names
INF_SHORT_DN=INF_SHORT_DEMNAMES
INF_LONG_DEMNAMES = 37 # int32; long form of demangled names
# see demangle.h for definitions
INF_LONG_DN=INF_LONG_DEMNAMES
INF_DEMNAMES = 38 # char; display demangled names as:
INF_LISTNAMES = 39 # uchar; What names should be included in the list?
# DISASSEMBLY LISTING DETAILS
INF_INDENT = 40 # char; Indention for instructions
INF_CMT_INDENT = 41 # char; Indention for comments
INF_COMMENT = 41 # for compatibility
INF_MARGIN = 42 # ushort; max length of data lines
INF_LENXREF = 43 # ushort; max length of line with xrefs
INF_OUTFLAGS = 44 # uint32; output flags
INF_CMTFLG = 45 # char; comments:
INF_CMTFLAG=INF_CMTFLG
INF_LIMITER = 46 # char; Generate borders?
INF_BORDER=INF_LIMITER
INF_BIN_PREFIX_SIZE = 47 # short; # of instruction bytes to show
# in line prefix
INF_BINPREF=INF_BIN_PREFIX_SIZE
INF_PREFFLAG = 48 # char; line prefix type:
# STRING LITERALS
INF_STRLIT_FLAGS= 49 # uchar; string literal flags
INF_STRLIT_BREAK= 50 # char; string literal line break symbol
INF_STRLIT_ZEROES= 51 # char; leading zeroes
INF_STRTYPE = 52 # int32; current ascii string type
# is considered as several bytes:
# low byte:
INF_STRLIT_PREF = 53 # char[16];ASCII names prefix
INF_STRLIT_SERNUM= 54 # uint32; serial number
# DATA ITEMS
INF_DATATYPES = 55 # int32; data types allowed in data carousel
# COMPILER
INF_CC_ID = 57 # uchar; compiler
COMP_MASK = 0x0F # mask to apply to get the pure compiler id
COMP_UNK = 0x00 # Unknown
COMP_MS = 0x01 # Visual C++
COMP_BC = 0x02 # Borland C++
COMP_WATCOM = 0x03 # Watcom C++
COMP_GNU = 0x06 # GNU C++
COMP_VISAGE = 0x07 # Visual Age C++
COMP_BP = 0x08 # Delphi
INF_CC_CM = 58 # uchar; memory model & calling convention
INF_CC_SIZE_I = 59 # uchar; sizeof(int)
INF_CC_SIZE_B = 60 # uchar; sizeof(bool)
INF_CC_SIZE_E = 61 # uchar; sizeof(enum)
INF_CC_DEFALIGN = 62 # uchar; default alignment
INF_CC_SIZE_S = 63
INF_CC_SIZE_L = 64
INF_CC_SIZE_LL = 65
INF_CC_SIZE_LDBL = 66 # uchar; sizeof(long double)
INF_COMPILER = INF_CC_ID
INF_MODEL = INF_CC_CM
INF_SIZEOF_INT = INF_CC_SIZE_I
INF_SIZEOF_BOOL = INF_CC_SIZE_B
INF_SIZEOF_ENUM = INF_CC_SIZE_E
INF_SIZEOF_ALGN = INF_CC_DEFALIGN
INF_SIZEOF_SHORT= INF_CC_SIZE_S
INF_SIZEOF_LONG = INF_CC_SIZE_L
INF_SIZEOF_LLONG= INF_CC_SIZE_LL
INF_SIZEOF_LDBL = INF_CC_SIZE_LDBL
INF_ABIBITS= 67 # uint32; ABI features
INF_APPCALL_OPTIONS= 68 # uint32; appcall options
_INF_attrs_accessors = {
INF_ABIBITS : (ida_ida.inf_get_abibits, ida_ida.inf_set_abibits),
INF_AF : (ida_ida.inf_get_af, ida_ida.inf_set_af),
INF_AF2 : (ida_ida.inf_get_af2, ida_ida.inf_set_af2),
INF_APPCALL_OPTIONS : (ida_ida.inf_get_appcall_options, ida_ida.inf_set_appcall_options),
INF_APPTYPE : (ida_ida.inf_get_apptype, ida_ida.inf_set_apptype),
INF_ASMTYPE : (ida_ida.inf_get_asmtype, ida_ida.inf_set_asmtype),
INF_BASEADDR : (ida_ida.inf_get_baseaddr, ida_ida.inf_set_baseaddr),
INF_BIN_PREFIX_SIZE : (ida_ida.inf_get_bin_prefix_size, ida_ida.inf_set_bin_prefix_size),
INF_CC_CM : (ida_ida.inf_get_cc_cm, ida_ida.inf_set_cc_cm),
INF_CC_DEFALIGN : (ida_ida.inf_get_cc_defalign, ida_ida.inf_set_cc_defalign),
INF_CC_ID : (ida_ida.inf_get_cc_id, ida_ida.inf_set_cc_id),
INF_CC_SIZE_B : (ida_ida.inf_get_cc_size_b, ida_ida.inf_set_cc_size_b),
INF_CC_SIZE_E : (ida_ida.inf_get_cc_size_e, ida_ida.inf_set_cc_size_e),
INF_CC_SIZE_I : (ida_ida.inf_get_cc_size_i, ida_ida.inf_set_cc_size_i),
INF_CC_SIZE_L : (ida_ida.inf_get_cc_size_l, ida_ida.inf_set_cc_size_l),
INF_CC_SIZE_LDBL : (ida_ida.inf_get_cc_size_ldbl, ida_ida.inf_set_cc_size_ldbl),
INF_CC_SIZE_LL : (ida_ida.inf_get_cc_size_ll, ida_ida.inf_set_cc_size_ll),
INF_CC_SIZE_S : (ida_ida.inf_get_cc_size_s, ida_ida.inf_set_cc_size_s),
INF_CMTFLAG : (ida_ida.inf_get_cmtflg, ida_ida.inf_set_cmtflg),
INF_CMT_INDENT : (ida_ida.inf_get_cmt_indent, ida_ida.inf_set_cmt_indent),
INF_DATABASE_CHANGE_COUNT : (ida_ida.inf_get_database_change_count, ida_ida.inf_set_database_change_count),
INF_DATATYPES : (ida_ida.inf_get_datatypes, ida_ida.inf_set_datatypes),
INF_DEMNAMES : (ida_ida.inf_get_demnames, ida_ida.inf_set_demnames),
INF_END_PRIVRANGE : (ida_ida.inf_get_privrange_end_ea, ida_ida.inf_set_privrange_end_ea),
INF_FILETYPE : (ida_ida.inf_get_filetype, ida_ida.inf_set_filetype),
INF_GENFLAGS : (ida_ida.inf_get_genflags, ida_ida.inf_set_genflags),
INF_HIGHOFF : (ida_ida.inf_get_highoff, ida_ida.inf_set_highoff),
INF_INDENT : (ida_ida.inf_get_indent, ida_ida.inf_set_indent),
INF_LENXREF : (ida_ida.inf_get_lenxref, ida_ida.inf_set_lenxref),
INF_LFLAGS : (ida_ida.inf_get_lflags, ida_ida.inf_set_lflags),
INF_LIMITER : (ida_ida.inf_get_limiter, ida_ida.inf_set_limiter),
INF_LISTNAMES : (ida_ida.inf_get_listnames, ida_ida.inf_set_listnames),
INF_LONG_DEMNAMES : (ida_ida.inf_get_long_demnames, ida_ida.inf_set_long_demnames),
INF_LOWOFF : (ida_ida.inf_get_lowoff, ida_ida.inf_set_lowoff),
INF_MAIN : (ida_ida.inf_get_main, ida_ida.inf_set_main),
INF_MARGIN : (ida_ida.inf_get_margin, ida_ida.inf_set_margin),
INF_MAXREF : (ida_ida.inf_get_maxref, ida_ida.inf_set_maxref),
INF_MAX_AUTONAME_LEN : (ida_ida.inf_get_max_autoname_len, ida_ida.inf_set_max_autoname_len),
INF_MAX_EA : (ida_ida.inf_get_max_ea, ida_ida.inf_set_max_ea),
INF_MIN_EA : (ida_ida.inf_get_min_ea, ida_ida.inf_set_min_ea),
INF_MODEL : (ida_ida.inf_get_cc_cm, ida_ida.inf_set_cc_cm),
INF_NAMETYPE : (ida_ida.inf_get_nametype, ida_ida.inf_set_nametype),
INF_NETDELTA : (ida_ida.inf_get_netdelta, ida_ida.inf_set_netdelta),
INF_OMAX_EA : (ida_ida.inf_get_omax_ea, ida_ida.inf_set_omax_ea),
INF_OMIN_EA : (ida_ida.inf_get_omin_ea, ida_ida.inf_set_omin_ea),
INF_OSTYPE : (ida_ida.inf_get_ostype, ida_ida.inf_set_ostype),
INF_OUTFLAGS : (ida_ida.inf_get_outflags, ida_ida.inf_set_outflags),
INF_PREFFLAG : (ida_ida.inf_get_prefflag, ida_ida.inf_set_prefflag),
INF_PRIVRANGE_END_EA : (ida_ida.inf_get_privrange_end_ea, ida_ida.inf_set_privrange_end_ea),
INF_PRIVRANGE_START_EA : (ida_ida.inf_get_privrange_start_ea, ida_ida.inf_set_privrange_start_ea),
INF_PROCNAME : (ida_ida.inf_get_procname, ida_ida.inf_set_procname),
INF_REFCMTNUM : (ida_ida.inf_get_refcmtnum, ida_ida.inf_set_refcmtnum),
INF_SHORT_DEMNAMES : (ida_ida.inf_get_short_demnames, ida_ida.inf_set_short_demnames),
INF_SPECSEGS : (ida_ida.inf_get_specsegs, ida_ida.inf_set_specsegs),
INF_START_CS : (ida_ida.inf_get_start_cs, ida_ida.inf_set_start_cs),
INF_START_EA : (ida_ida.inf_get_start_ea, ida_ida.inf_set_start_ea),
INF_START_IP : (ida_ida.inf_get_start_ip, ida_ida.inf_set_start_ip),
INF_START_PRIVRANGE : (ida_ida.inf_get_privrange_start_ea, ida_ida.inf_set_privrange_start_ea),
INF_START_SP : (ida_ida.inf_get_start_sp, ida_ida.inf_set_start_sp),
INF_START_SS : (ida_ida.inf_get_start_ss, ida_ida.inf_set_start_ss),
INF_STRLIT_BREAK : (ida_ida.inf_get_strlit_break, ida_ida.inf_set_strlit_break),
INF_STRLIT_FLAGS : (ida_ida.inf_get_strlit_flags, ida_ida.inf_set_strlit_flags),
INF_STRLIT_PREF : (ida_ida.inf_get_strlit_pref, ida_ida.inf_set_strlit_pref),
INF_STRLIT_SERNUM : (ida_ida.inf_get_strlit_sernum, ida_ida.inf_set_strlit_sernum),
INF_STRLIT_ZEROES : (ida_ida.inf_get_strlit_zeroes, ida_ida.inf_set_strlit_zeroes),
INF_STRTYPE : (ida_ida.inf_get_strtype, ida_ida.inf_set_strtype),
INF_TYPE_XREFNUM : (ida_ida.inf_get_type_xrefnum, ida_ida.inf_set_type_xrefnum),
INF_VERSION : (ida_ida.inf_get_version, ida_ida.inf_set_version),
INF_XREFFLAG : (ida_ida.inf_get_xrefflag, ida_ida.inf_set_xrefflag),
INF_XREFNUM : (ida_ida.inf_get_xrefnum, ida_ida.inf_set_xrefnum),
}
def get_inf_attr(attr):
"""
Deprecated. Please ida_ida.inf_get_* instead.
"""
return _INF_attrs_accessors[attr][0]()
def set_inf_attr(attr, value):
"""
Deprecated. Please ida_ida.inf_set_* instead.
"""
_INF_attrs_accessors[attr][1](value)
return 1
set_processor_type = ida_idp.set_processor_type
SETPROC_IDB = ida_idp.SETPROC_IDB
SETPROC_LOADER = ida_idp.SETPROC_LOADER
SETPROC_LOADER_NON_FATAL = ida_idp.SETPROC_LOADER_NON_FATAL
SETPROC_USER = ida_idp.SETPROC_USER
def SetPrcsr(processor): return set_processor_type(processor, SETPROC_USER)
set_target_assembler = ida_idp.set_target_assembler
def batch(batch):
"""
Enable/disable batch mode of operation
@param batch: batch mode
0 - ida will display dialog boxes and wait for the user input
1 - ida will not display dialog boxes, warnings, etc.
@return: old balue of batch flag
"""
batch_prev = ida_kernwin.cvar.batch
ida_kernwin.cvar.batch = batch
return batch_prev
#----------------------------------------------------------------------------
# I N T E R A C T I O N W I T H T H E U S E R
#----------------------------------------------------------------------------
def process_ui_action(name, flags=0):
"""
Invokes an IDA UI action by name
@param name: Command name
@param flags: Reserved. Must be zero
@return: Boolean
"""
return ida_kernwin.process_ui_action(name, flags)
ask_seg = ida_kernwin.ask_seg
ask_yn = ida_kernwin.ask_yn
msg = ida_kernwin.msg
warning = ida_kernwin.warning
error = ida_kernwin.error
set_ida_state = ida_auto.set_ida_state
IDA_STATUS_READY = 0 # READY IDA is idle
IDA_STATUS_THINKING = 1 # THINKING Analyzing but the user may press keys
IDA_STATUS_WAITING = 2 # WAITING Waiting for the user input
IDA_STATUS_WORK = 3 # BUSY IDA is busy
refresh_idaview_anyway = ida_kernwin.refresh_idaview_anyway
refresh_lists = ida_kernwin.refresh_choosers
#----------------------------------------------------------------------------
# S E G M E N T A T I O N
#----------------------------------------------------------------------------
def sel2para(sel):
"""
Get a selector value
@param sel: the selector number
@return: selector value if found
otherwise the input value (sel)
@note: selector values are always in paragraphs
"""
s = ida_pro.sel_pointer()
base = ida_pro.ea_pointer()
res,tmp = ida_segment.getn_selector(sel, s.cast(), base.cast())
if not res:
return sel
else:
return base.value()
def find_selector(val):
"""
Find a selector which has the specifed value
@param val: value to search for
@return: the selector number if found,
otherwise the input value (val & 0xFFFF)
@note: selector values are always in paragraphs
"""
return ida_segment.find_selector(val) & 0xFFFF
set_selector = ida_segment.set_selector
del_selector = ida_segment.del_selector
def get_first_seg():
"""
Get first segment
@return: address of the start of the first segment
BADADDR - no segments are defined
"""
seg = ida_segment.get_first_seg()
if not seg:
return BADADDR
else:
return seg.start_ea
def get_next_seg(ea):
"""
Get next segment
@param ea: linear address
@return: start of the next segment
BADADDR - no next segment
"""
nextseg = ida_segment.get_next_seg(ea)
if not nextseg:
return BADADDR
else:
return nextseg.start_ea
def get_segm_start(ea):
"""
Get start address of a segment
@param ea: any address in the segment
@return: start of segment
BADADDR - the specified address doesn't belong to any segment
"""
seg = ida_segment.getseg(ea)
if not seg:
return BADADDR
else:
return seg.start_ea
def get_segm_end(ea):
"""
Get end address of a segment
@param ea: any address in the segment
@return: end of segment (an address past end of the segment)
BADADDR - the specified address doesn't belong to any segment
"""
seg = ida_segment.getseg(ea)
if not seg:
return BADADDR
else:
return seg.end_ea
def get_segm_name(ea):
"""
Get name of a segment
@param ea: any address in the segment
@return: "" - no segment at the specified address
"""
seg = ida_segment.getseg(ea)
if not seg:
return ""
else:
name = ida_segment.get_segm_name(seg)
if not name:
return ""
else:
return name
def add_segm_ex(startea, endea, base, use32, align, comb, flags):
"""
Create a new segment
@param startea: linear address of the start of the segment
@param endea: linear address of the end of the segment
this address will not belong to the segment
'endea' should be higher than 'startea'
@param base: base paragraph or selector of the segment.
a paragraph is 16byte memory chunk.
If a selector value is specified, the selector should be
already defined.
@param use32: 0: 16bit segment, 1: 32bit segment, 2: 64bit segment
@param align: segment alignment. see below for alignment values
@param comb: segment combination. see below for combination values.
@param flags: combination of ADDSEG_... bits
@return: 0-failed, 1-ok
"""
s = ida_segment.segment_t()
s.start_ea = startea
s.end_ea = endea
s.sel = ida_segment.setup_selector(base)
s.bitness = use32
s.align = align
s.comb = comb
return ida_segment.add_segm_ex(s, "", "", flags)
ADDSEG_NOSREG = ida_segment.ADDSEG_NOSREG # set all default segment register values
# to BADSELs
# (undefine all default segment registers)
ADDSEG_OR_DIE = ida_segment. ADDSEG_OR_DIE # qexit() if can't add a segment
ADDSEG_NOTRUNC = ida_segment.ADDSEG_NOTRUNC # don't truncate the new segment at the beginning
# of the next segment if they overlap.
# destroy/truncate old segments instead.
ADDSEG_QUIET = ida_segment.ADDSEG_QUIET # silent mode, no "Adding segment..." in the messages window
ADDSEG_FILLGAP = ida_segment.ADDSEG_FILLGAP # If there is a gap between the new segment
# and the previous one, and this gap is less
# than 64K, then fill the gap by extending the
# previous segment and adding .align directive
# to it. This way we avoid gaps between segments.
# Too many gaps lead to a virtual array failure.
# It cannot hold more than ~1000 gaps.
ADDSEG_SPARSE = ida_segment.ADDSEG_SPARSE # Use sparse storage method for the new segment
def AddSeg(startea, endea, base, use32, align, comb):
return add_segm_ex(startea, endea, base, use32, align, comb, ADDSEG_NOSREG)
del_segm = ida_segment.del_segm
SEGMOD_KILL = ida_segment.SEGMOD_KILL # disable addresses if segment gets
# shrinked or deleted
SEGMOD_KEEP = ida_segment.SEGMOD_KEEP # keep information (code & data, etc)
SEGMOD_SILENT = ida_segment.SEGMOD_SILENT # be silent
def set_segment_bounds(ea, startea, endea, flags):
"""
Change segment boundaries
@param ea: any address in the segment
@param startea: new start address of the segment
@param endea: new end address of the segment
@param flags: combination of SEGMOD_... flags
@return: boolean success
"""
return ida_segment.set_segm_start(ea, startea, flags) & \
ida_segment.set_segm_end(ea, endea, flags)
def set_segm_name(ea, name):
"""
Change name of the segment
@param ea: any address in the segment
@param name: new name of the segment
@return: success (boolean)
"""
seg = ida_segment.getseg(ea)
if not seg:
return False
return ida_segment.set_segm_name(seg, name)
def set_segm_class(ea, segclass):
"""
Change class of the segment
@param ea: any address in the segment
@param segclass: new class of the segment
@return: success (boolean)
"""
seg = ida_segment.getseg(ea)
if not seg:
return False
return ida_segment.set_segm_class(seg, segclass)
def set_segm_alignment(ea, alignment):
"""
Change alignment of the segment
@param ea: any address in the segment
@param alignment: new alignment of the segment (one of the sa... constants)
@return: success (boolean)
"""
return set_segm_attr(ea, SEGATTR_ALIGN, alignment)
if ida_idaapi.uses_swig_builtins:
_scope = ida_segment.segment_t
else:
_scope = ida_segment
saAbs = _scope.saAbs # Absolute segment.
saRelByte = _scope.saRelByte # Relocatable, byte aligned.
saRelWord = _scope.saRelWord # Relocatable, word (2-byte, 16-bit) aligned.
saRelPara = _scope.saRelPara # Relocatable, paragraph (16-byte) aligned.
saRelPage = _scope.saRelPage # Relocatable, aligned on 256-byte boundary
# (a "page" in the original Intel specification).
saRelDble = _scope.saRelDble # Relocatable, aligned on a double word
# (4-byte) boundary. This value is used by
# the PharLap OMF for the same alignment.
saRel4K = _scope.saRel4K # This value is used by the PharLap OMF for
# page (4K) alignment. It is not supported
# by LINK.
saGroup = _scope.saGroup # Segment group
saRel32Bytes = _scope.saRel32Bytes # 32 bytes
saRel64Bytes = _scope.saRel64Bytes # 64 bytes
saRelQword = _scope.saRelQword # 8 bytes
def set_segm_combination(segea, comb):
"""
Change combination of the segment
@param segea: any address in the segment
@param comb: new combination of the segment (one of the sc... constants)
@return: success (boolean)
"""
return set_segm_attr(segea, SEGATTR_COMB, comb)
scPriv = _scope.scPriv # Private. Do not combine with any other program
# segment.
scPub = _scope.scPub # Public. Combine by appending at an offset that
# meets the alignment requirement.
scPub2 = _scope.scPub2 # As defined by Microsoft, same as C=2 (public).
scStack = _scope.scStack # Stack. Combine as for C=2. This combine type
# forces byte alignment.
scCommon = _scope.scCommon # Common. Combine by overlay using maximum size.
scPub3 = _scope.scPub3 # As defined by Microsoft, same as C=2 (public).
def set_segm_addressing(ea, bitness):
"""
Change segment addressing
@param ea: any address in the segment
@param bitness: 0: 16bit, 1: 32bit, 2: 64bit
@return: success (boolean)
"""
seg = ida_segment.getseg(ea)
if not seg:
return False
seg.bitness = bitness
return True
def selector_by_name(segname):
"""
Get segment selector by name
@param segname: name of segment
@return: segment selector or BADADDR
"""
seg = ida_segment.get_segm_by_name(segname)
if not seg:
return BADADDR
return seg.sel
def set_default_sreg_value(ea, reg, value):
"""
Set default segment register value for a segment
@param ea: any address in the segment
if no segment is present at the specified address
then all segments will be affected
@param reg: name of segment register
@param value: default value of the segment register. -1-undefined.
"""
seg = ida_segment.getseg(ea)
reg = ida_idp.str2reg(reg);
if seg and reg >= 0:
return ida_segregs.set_default_sreg_value(seg, reg, value)
else:
return False
def set_segm_type(segea, segtype):
"""
Set segment type
@param segea: any address within segment
@param segtype: new segment type:
@return: !=0 - ok
"""
seg = ida_segment.getseg(segea)
if not seg:
return False
seg.type = segtype
return seg.update()
SEG_NORM = _scope.SEG_NORM
SEG_XTRN = _scope.SEG_XTRN # * segment with 'extern' definitions
# no instructions are allowed
SEG_CODE = _scope.SEG_CODE # pure code segment
SEG_DATA = _scope.SEG_DATA # pure data segment
SEG_IMP = _scope.SEG_IMP # implementation segment
SEG_GRP = _scope.SEG_GRP # * group of segments
# no instructions are allowed
SEG_NULL = _scope.SEG_NULL # zero-length segment
SEG_UNDF = _scope.SEG_UNDF # undefined segment type
SEG_BSS = _scope.SEG_BSS # uninitialized segment
SEG_ABSSYM = _scope.SEG_ABSSYM # * segment with definitions of absolute symbols
# no instructions are allowed
SEG_COMM = _scope.SEG_COMM # * segment with communal definitions
# no instructions are allowed
SEG_IMEM = _scope.SEG_IMEM # internal processor memory & sfr (8051)
def get_segm_attr(segea, attr):
"""
Get segment attribute
@param segea: any address within segment
@param attr: one of SEGATTR_... constants
"""
seg = ida_segment.getseg(segea)
assert seg, "could not find segment at 0x%x" % segea
if attr in [ SEGATTR_ES, SEGATTR_CS, SEGATTR_SS, SEGATTR_DS, SEGATTR_FS, SEGATTR_GS ]:
return ida_segment.get_defsr(seg, _SEGATTRMAP[attr][1])
else:
return _IDC_GetAttr(seg, _SEGATTRMAP, attr)
def set_segm_attr(segea, attr, value):
"""
Set segment attribute
@param segea: any address within segment
@param attr: one of SEGATTR_... constants
@note: Please note that not all segment attributes are modifiable.
Also some of them should be modified using special functions
like set_segm_addressing, etc.
"""
seg = ida_segment.getseg(segea)
assert seg, "could not find segment at 0x%x" % segea
if attr in [ SEGATTR_ES, SEGATTR_CS, SEGATTR_SS, SEGATTR_DS, SEGATTR_FS, SEGATTR_GS ]:
ida_segment.set_defsr(seg, _SEGATTRMAP[attr][1], value)
else:
_IDC_SetAttr(seg, _SEGATTRMAP, attr, value)
return seg.update()
SEGATTR_START = 0 # starting address
SEGATTR_END = 4 # ending address
SEGATTR_ORGBASE = 16
SEGATTR_ALIGN = 20 # alignment
SEGATTR_COMB = 21 # combination
SEGATTR_PERM = 22 # permissions
SEGATTR_BITNESS = 23 # bitness (0: 16, 1: 32, 2: 64 bit segment)
# Note: modifying the attribute directly does
# not lead to the reanalysis of the segment.
# Using set_segm_addressing() is more correct.
SEGATTR_FLAGS = 24 # segment flags
SEGATTR_SEL = 28 # segment selector
SEGATTR_ES = 32 # default ES value
SEGATTR_CS = 36 # default CS value
SEGATTR_SS = 40 # default SS value
SEGATTR_DS = 44 # default DS value
SEGATTR_FS = 48 # default FS value
SEGATTR_GS = 52 # default GS value
SEGATTR_TYPE = 96 # segment type
SEGATTR_COLOR = 100 # segment color
# Redefining these for 64-bit
if __EA64__:
SEGATTR_START = 0
SEGATTR_END = 8
SEGATTR_ORGBASE = 32
SEGATTR_ALIGN = 40
SEGATTR_COMB = 41
SEGATTR_PERM = 42
SEGATTR_BITNESS = 43
SEGATTR_FLAGS = 44
SEGATTR_SEL = 48
SEGATTR_ES = 56
SEGATTR_CS = 64
SEGATTR_SS = 72
SEGATTR_DS = 80
SEGATTR_FS = 88
SEGATTR_GS = 96
SEGATTR_TYPE = 184
SEGATTR_COLOR = 188
_SEGATTRMAP = {
SEGATTR_START : (True, 'start_ea'),
SEGATTR_END : (True, 'end_ea'),
SEGATTR_ORGBASE : (False, 'orgbase'),
SEGATTR_ALIGN : (False, 'align'),
SEGATTR_COMB : (False, 'comb'),
SEGATTR_PERM : (False, 'perm'),
SEGATTR_BITNESS : (False, 'bitness'),
SEGATTR_FLAGS : (False, 'flags'),
SEGATTR_SEL : (False, 'sel'),
SEGATTR_ES : (False, 0),
SEGATTR_CS : (False, 1),
SEGATTR_SS : (False, 2),
SEGATTR_DS : (False, 3),
SEGATTR_FS : (False, 4),
SEGATTR_GS : (False, 5),
SEGATTR_TYPE : (False, 'type'),
SEGATTR_COLOR : (False, 'color'),
}
# Valid segment flags
SFL_COMORG = 0x01 # IDP dependent field (IBM PC: if set, ORG directive is not commented out)
SFL_OBOK = 0x02 # orgbase is present? (IDP dependent field)
SFL_HIDDEN = 0x04 # is the segment hidden?
SFL_DEBUG = 0x08 # is the segment created for the debugger?
SFL_LOADER = 0x10 # is the segment created by the loader?
SFL_HIDETYPE = 0x20 # hide segment type (do not print it in the listing)
def move_segm(ea, to, flags):
"""
Move a segment to a new address
This function moves all information to the new address
It fixes up address sensitive information in the kernel
The total effect is equal to reloading the segment to the target address
@param ea: any address within the segment to move
@param to: new segment start address
@param flags: combination MFS_... constants
@returns: MOVE_SEGM_... error code
"""
seg = ida_segment.getseg(ea)
if not seg:
return MOVE_SEGM_PARAM
return ida_segment.move_segm(seg, to, flags)
MSF_SILENT = 0x0001 # don't display a "please wait" box on the screen
MSF_NOFIX = 0x0002 # don't call the loader to fix relocations
MSF_LDKEEP = 0x0004 # keep the loader in the memory (optimization)
MSF_FIXONCE = 0x0008 # valid for rebase_program(): call loader only once
MOVE_SEGM_OK = 0 # all ok
MOVE_SEGM_PARAM = -1 # The specified segment does not exist
MOVE_SEGM_ROOM = -2 # Not enough free room at the target address
MOVE_SEGM_IDP = -3 # IDP module forbids moving the segment
MOVE_SEGM_CHUNK = -4 # Too many chunks are defined, can't move
MOVE_SEGM_LOADER = -5 # The segment has been moved but the loader complained
MOVE_SEGM_ODD = -6 # Can't move segments by an odd number of bytes
rebase_program = ida_segment.rebase_program
set_storage_type = ida_bytes.change_storage_type
STT_VA = 0 # regular storage: virtual arrays, an explicit flag for each byte
STT_MM = 1 # memory map: sparse storage. useful for huge objects
#----------------------------------------------------------------------------
# C R O S S R E F E R E N C E S
#----------------------------------------------------------------------------
# Flow types (combine with XREF_USER!):
fl_CF = 16 # Call Far
fl_CN = 17 # Call Near
fl_JF = 18 # jumpto Far
fl_JN = 19 # jumpto Near
fl_F = 21 # Ordinary flow
XREF_USER = 32 # All user-specified xref types
# must be combined with this bit
# Mark exec flow 'from' 'to'
add_cref = ida_xref.add_cref
del_cref = ida_xref.del_cref
# The following functions include the ordinary flows:
# (the ordinary flow references are returned first)
get_first_cref_from = ida_xref.get_first_cref_from
get_next_cref_from = ida_xref.get_next_cref_from
get_first_cref_to = ida_xref.get_first_cref_to
get_next_cref_to = ida_xref.get_next_cref_to
# The following functions don't take into account the ordinary flows:
get_first_fcref_from = ida_xref.get_first_fcref_from
get_next_fcref_from = ida_xref.get_next_fcref_from
get_first_fcref_to = ida_xref.get_first_fcref_to
get_next_fcref_to = ida_xref.get_next_fcref_to
# Data reference types (combine with XREF_USER!):
dr_O = ida_xref.dr_O # Offset
dr_W = ida_xref.dr_W # Write
dr_R = ida_xref.dr_R # Read
dr_T = ida_xref.dr_T # Text (names in manual operands)
dr_I = ida_xref.dr_I # Informational
add_dref = ida_xref.add_dref
del_dref = ida_xref.del_dref
get_first_dref_from = ida_xref.get_first_dref_from
get_next_dref_from = ida_xref.get_next_dref_from
get_first_dref_to = ida_xref.get_first_dref_to
get_next_dref_to = ida_xref.get_next_dref_to
def get_xref_type():
"""
Return type of the last xref obtained by
[RD]first/next[B0] functions.
@return: constants fl_* or dr_*
"""
raise DeprecatedIDCError("use XrefsFrom() XrefsTo() from idautils instead.")
#----------------------------------------------------------------------------
# F I L E I / O
#----------------------------------------------------------------------------
def fopen(f, mode):
raise DeprecatedIDCError("fopen() deprecated. Use Python file objects instead.")
def fclose(handle):
raise DeprecatedIDCError("fclose() deprecated. Use Python file objects instead.")
def filelength(handle):
raise DeprecatedIDCError("filelength() deprecated. Use Python file objects instead.")
def fseek(handle, offset, origin):
raise DeprecatedIDCError("fseek() deprecated. Use Python file objects instead.")
def ftell(handle):
raise DeprecatedIDCError("ftell() deprecated. Use Python file objects instead.")
def LoadFile(filepath, pos, ea, size):
"""
Load file into IDA database
@param filepath: path to input file
@param pos: position in the file
@param ea: linear address to load
@param size: number of bytes to load
@return: 0 - error, 1 - ok
"""
li = ida_diskio.open_linput(filepath, False)
if li:
retval = ida_loader.file2base(li, pos, ea, ea+size, False)
ida_diskio.close_linput(li)
return retval
else:
return 0
def loadfile(filepath, pos, ea, size): return LoadFile(filepath, pos, ea, size)
def SaveFile(filepath, pos, ea, size):
"""
Save from IDA database to file
@param filepath: path to output file
@param pos: position in the file
@param ea: linear address to save from
@param size: number of bytes to save
@return: 0 - error, 1 - ok
"""
if ( os.path.isfile(filepath) ):
of = ida_diskio.fopenM(filepath)
else:
of = ida_diskio.fopenWB(filepath)
if of:
retval = ida_loader.base2file(of, pos, ea, ea+size)
ida_diskio.eclose(of)
return retval
else:
return 0
def savefile(filepath, pos, ea, size): return SaveFile(filepath, pos, ea, size)
def fgetc(handle):
raise DeprecatedIDCError("fgetc() deprecated. Use Python file objects instead.")
def fputc(byte, handle):
raise DeprecatedIDCError("fputc() deprecated. Use Python file objects instead.")
def fprintf(handle, format, *args):
raise DeprecatedIDCError("fprintf() deprecated. Use Python file objects instead.")
def readshort(handle, mostfirst):
raise DeprecatedIDCError("readshort() deprecated. Use Python file objects instead.")
def readlong(handle, mostfirst):
raise DeprecatedIDCError("readlong() deprecated. Use Python file objects instead.")
def writeshort(handle, word, mostfirst):
raise DeprecatedIDCError("writeshort() deprecated. Use Python file objects instead.")
def writelong(handle, dword, mostfirst):
raise DeprecatedIDCError("writelong() deprecated. Use Python file objects instead.")
def readstr(handle):
raise DeprecatedIDCError("readstr() deprecated. Use Python file objects instead.")
def writestr(handle, s):
raise DeprecatedIDCError("writestr() deprecated. Use Python file objects instead.")
# ----------------------------------------------------------------------------
# F U N C T I O N S
# ----------------------------------------------------------------------------
add_func = ida_funcs.add_func
del_func = ida_funcs.del_func
set_func_end = ida_funcs.set_func_end
def get_next_func(ea):
"""
Find next function
@param ea: any address belonging to the function
@return: BADADDR - no more functions
otherwise returns the next function start address
"""
func = ida_funcs.get_next_func(ea)
if not func:
return BADADDR
else:
return func.start_ea
def get_prev_func(ea):
"""
Find previous function
@param ea: any address belonging to the function
@return: BADADDR - no more functions
otherwise returns the previous function start address
"""
func = ida_funcs.get_prev_func(ea)
if not func:
return BADADDR
else:
return func.start_ea
def get_func_attr(ea, attr):
"""
Get a function attribute
@param ea: any address belonging to the function
@param attr: one of FUNCATTR_... constants
@return: BADADDR - error otherwise returns the attribute value
"""
func = ida_funcs.get_func(ea)
return _IDC_GetAttr(func, _FUNCATTRMAP, attr) if func else BADADDR
def set_func_attr(ea, attr, value):
"""
Set a function attribute
@param ea: any address belonging to the function
@param attr: one of FUNCATTR_... constants
@param value: new value of the attribute
@return: 1-ok, 0-failed
"""
func = ida_funcs.get_func(ea)
if func:
_IDC_SetAttr(func, _FUNCATTRMAP, attr, value)
return ida_funcs.update_func(func)
return 0
FUNCATTR_START = 0 # readonly: function start address
FUNCATTR_END = 4 # readonly: function end address
FUNCATTR_FLAGS = 8 # function flags
FUNCATTR_FRAME = 16 # readonly: function frame id
FUNCATTR_FRSIZE = 20 # readonly: size of local variables
FUNCATTR_FRREGS = 24 # readonly: size of saved registers area
FUNCATTR_ARGSIZE = 28 # readonly: number of bytes purged from the stack
FUNCATTR_FPD = 32 # frame pointer delta
FUNCATTR_COLOR = 36 # function color code
FUNCATTR_OWNER = 16 # readonly: chunk owner (valid only for tail chunks)
FUNCATTR_REFQTY = 20 # readonly: number of chunk parents (valid only for tail chunks)
# Redefining the constants for ea64
if __EA64__:
FUNCATTR_START = 0
FUNCATTR_END = 8
FUNCATTR_FLAGS = 16
FUNCATTR_FRAME = 24
FUNCATTR_FRSIZE = 32
FUNCATTR_FRREGS = 40
FUNCATTR_ARGSIZE = 48
FUNCATTR_FPD = 56
FUNCATTR_COLOR = 64
FUNCATTR_OWNER = 24
FUNCATTR_REFQTY = 32
_FUNCATTRMAP = {
FUNCATTR_START : (True, 'start_ea'),
FUNCATTR_END : (True, 'end_ea'),
FUNCATTR_FLAGS : (False, 'flags'),
FUNCATTR_FRAME : (True, 'frame'),
FUNCATTR_FRSIZE : (True, 'frsize'),
FUNCATTR_FRREGS : (True, 'frregs'),
FUNCATTR_ARGSIZE : (True, 'argsize'),
FUNCATTR_FPD : (False, 'fpd'),
FUNCATTR_COLOR : (False, 'color'),
FUNCATTR_OWNER : (True, 'owner'),
FUNCATTR_REFQTY : (True, 'refqty')
}
def get_func_flags(ea):
"""
Retrieve function flags
@param ea: any address belonging to the function
@return: -1 - function doesn't exist otherwise returns the flags
"""
func = ida_funcs.get_func(ea)
if not func:
return -1
else:
return func.flags
if ida_idaapi.uses_swig_builtins:
_scope = ida_funcs.func_t
else:
_scope = ida_funcs
FUNC_NORET = _scope.FUNC_NORET # function doesn't return
FUNC_FAR = _scope.FUNC_FAR # far function
FUNC_LIB = _scope.FUNC_LIB # library function
FUNC_STATIC = _scope.FUNC_STATICDEF # static function
FUNC_FRAME = _scope.FUNC_FRAME # function uses frame pointer (BP)
FUNC_USERFAR = _scope.FUNC_USERFAR # user has specified far-ness
# of the function
FUNC_HIDDEN = _scope.FUNC_HIDDEN # a hidden function
FUNC_THUNK = _scope.FUNC_THUNK # thunk (jump) function
FUNC_BOTTOMBP = _scope.FUNC_BOTTOMBP # BP points to the bottom of the stack frame
FUNC_NORET_PENDING = _scope.FUNC_NORET_PENDING # Function 'non-return' analysis
# must be performed. This flag is
# verified upon func_does_return()
FUNC_SP_READY = _scope.FUNC_SP_READY # SP-analysis has been performed
# If this flag is on, the stack
# change points should not be not
# modified anymore. Currently this
# analysis is performed only for PC
FUNC_PURGED_OK = _scope.FUNC_PURGED_OK # 'argsize' field has been validated.
# If this bit is clear and 'argsize'
# is 0, then we do not known the real
# number of bytes removed from
# the stack. This bit is handled
# by the processor module.
FUNC_TAIL = _scope.FUNC_TAIL # This is a function tail.
# Other bits must be clear
# (except FUNC_HIDDEN)
def set_func_flags(ea, flags):
"""
Change function flags
@param ea: any address belonging to the function
@param flags: see get_func_flags() for explanations
@return: !=0 - ok
"""
func = ida_funcs.get_func(ea)
if not func:
return 0
else:
func.flags = flags
ida_funcs.update_func(func)
return 1
def get_func_name(ea):
"""
Retrieve function name
@param ea: any address belonging to the function
@return: null string - function doesn't exist
otherwise returns function name
"""
name = ida_funcs.get_func_name(ea)
if not name:
return ""
else:
return name
def get_func_cmt(ea, repeatable):
"""
Retrieve function comment
@param ea: any address belonging to the function
@param repeatable: 1: get repeatable comment
0: get regular comment
@return: function comment string
"""
func = ida_funcs.get_func(ea)
if not func:
return ""
else:
comment = ida_funcs.get_func_cmt(func, repeatable)
if not comment:
return ""
else:
return comment
def set_func_cmt(ea, cmt, repeatable):
"""
Set function comment
@param ea: any address belonging to the function
@param cmt: a function comment line
@param repeatable: 1: get repeatable comment
0: get regular comment
"""
func = ida_funcs.get_func(ea)
if not func:
return None
else:
return ida_funcs.set_func_cmt(func, cmt, repeatable)
def choose_func(title):
"""
Ask the user to select a function
Arguments:
@param title: title of the dialog box
@return: -1 - user refused to select a function
otherwise returns the selected function start address
"""
f = ida_kernwin.choose_func(title, ida_idaapi.BADADDR)
return BADADDR if f is None else f.start_ea
def get_func_off_str(ea):
"""
Convert address to 'funcname+offset' string
@param ea: address to convert
@return: if the address belongs to a function then return a string
formed as 'name+offset' where 'name' is a function name
'offset' is offset within the function else return null string
"""
flags = ida_name.GNCN_NOCOLOR | ida_name.GNCN_REQFUNC
return ida_name.get_nice_colored_name(ea, flags)
def find_func_end(ea):
"""
Determine a new function boundaries
@param ea: starting address of a new function
@return: if a function already exists, then return its end address.
If a function end cannot be determined, the return BADADDR
otherwise return the end address of the new function
"""
func = ida_funcs.func_t(ea)
res = ida_funcs.find_func_bounds(func, ida_funcs.FIND_FUNC_DEFINE)
if res == ida_funcs.FIND_FUNC_UNDEF:
return BADADDR
else:
return func.end_ea
def get_frame_id(ea):
"""
Get ID of function frame structure
@param ea: any address belonging to the function
@return: ID of function frame or None In order to access stack variables
you need to use structure member manipulaion functions with the
obtained ID.
"""
frame = ida_frame.get_frame(ea)
if frame:
return frame.id
else:
return None
def get_frame_lvar_size(ea):
"""
Get size of local variables in function frame
@param ea: any address belonging to the function
@return: Size of local variables in bytes.
If the function doesn't have a frame, return 0
If the function does't exist, return None
"""
return get_func_attr(ea, FUNCATTR_FRSIZE)
def get_frame_regs_size(ea):
"""
Get size of saved registers in function frame
@param ea: any address belonging to the function
@return: Size of saved registers in bytes.
If the function doesn't have a frame, return 0
This value is used as offset for BP (if FUNC_FRAME is set)
If the function does't exist, return None
"""
return get_func_attr(ea, FUNCATTR_FRREGS)
def get_frame_args_size(ea):
"""
Get size of arguments in function frame which are purged upon return
@param ea: any address belonging to the function
@return: Size of function arguments in bytes.
If the function doesn't have a frame, return 0
If the function does't exist, return -1
"""
return get_func_attr(ea, FUNCATTR_ARGSIZE)
def get_frame_size(ea):
"""
Get full size of function frame
@param ea: any address belonging to the function
@returns: Size of function frame in bytes.
This function takes into account size of local
variables + size of saved registers + size of
return address + size of function arguments
If the function doesn't have a frame, return size of
function return address in the stack.
If the function does't exist, return 0
"""
func = ida_funcs.get_func(ea)
if not func:
return 0
else:
return ida_frame.get_frame_size(func)
def set_frame_size(ea, lvsize, frregs, argsize):
"""
Make function frame
@param ea: any address belonging to the function
@param lvsize: size of function local variables
@param frregs: size of saved registers
@param argsize: size of function arguments
@return: ID of function frame or -1
If the function did not have a frame, the frame
will be created. Otherwise the frame will be modified
"""
func = ida_funcs.get_func(ea)
if func is None:
return -1
frameid = ida_frame.add_frame(func, lvsize, frregs, argsize)
if not frameid:
if not ida_frame.set_frame_size(func, lvsize, frregs, argsize):
return -1
return func.frame
def get_spd(ea):
"""
Get current delta for the stack pointer
@param ea: end address of the instruction
i.e.the last address of the instruction+1
@return: The difference between the original SP upon
entering the function and SP for the specified address
"""
func = ida_funcs.get_func(ea)
if not func:
return None
return ida_frame.get_spd(func, ea)
def get_sp_delta(ea):
"""
Get modification of SP made by the instruction
@param ea: end address of the instruction
i.e.the last address of the instruction+1
@return: Get modification of SP made at the specified location
If the specified location doesn't contain a SP change point, return 0
Otherwise return delta of SP modification
"""
func = ida_funcs.get_func(ea)
if not func:
return None
return ida_frame.get_sp_delta(func, ea)
# ----------------------------------------------------------------------------
# S T A C K
# ----------------------------------------------------------------------------
def add_auto_stkpnt(func_ea, ea, delta):
"""
Add automatical SP register change point
@param func_ea: function start
@param ea: linear address where SP changes
usually this is the end of the instruction which
modifies the stack pointer (insn.ea+insn.size)
@param delta: difference between old and new values of SP
@return: 1-ok, 0-failed
"""
pfn = ida_funcs.get_func(func_ea)
if not pfn:
return 0
return ida_frame.add_auto_stkpnt(pfn, ea, delta)
add_user_stkpnt = ida_frame.add_user_stkpnt
def del_stkpnt(func_ea, ea):
"""
Delete SP register change point
@param func_ea: function start
@param ea: linear address
@return: 1-ok, 0-failed
"""
pfn = ida_funcs.get_func(func_ea)
if not pfn:
return 0
return ida_frame.del_stkpnt(pfn, ea)
def get_min_spd_ea(func_ea):
"""
Return the address with the minimal spd (stack pointer delta)
If there are no SP change points, then return BADADDR.
@param func_ea: function start
@return: BADDADDR - no such function
"""
pfn = ida_funcs.get_func(func_ea)
if not pfn:
return BADADDR
return ida_frame.get_min_spd_ea(pfn)
recalc_spd = ida_frame.recalc_spd
# ----------------------------------------------------------------------------
# E N T R Y P O I N T S
# ----------------------------------------------------------------------------
get_entry_qty = ida_entry.get_entry_qty
add_entry = ida_entry.add_entry
get_entry_ordinal = ida_entry.get_entry_ordinal
get_entry = ida_entry.get_entry
get_entry_name = ida_entry.get_entry_name
rename_entry = ida_entry.rename_entry
# ----------------------------------------------------------------------------
# F I X U P S
# ----------------------------------------------------------------------------
get_next_fixup_ea = ida_fixup.get_next_fixup_ea
get_prev_fixup_ea = ida_fixup.get_prev_fixup_ea
def get_fixup_target_type(ea):
"""
Get fixup target type
@param ea: address to get information about
@return: 0 - no fixup at the specified address
otherwise returns fixup type
"""
fd = ida_fixup.fixup_data_t()
if not fd.get(ea):
return 0
return fd.get_type()
FIXUP_OFF8 = 13 # 8-bit offset.
FIXUP_OFF16 = 1 # 16-bit offset.
FIXUP_SEG16 = 2 # 16-bit base--logical segment base (selector).
FIXUP_PTR32 = 3 # 32-bit long pointer (16-bit base:16-bit
# offset).
FIXUP_OFF32 = 4 # 32-bit offset.
FIXUP_PTR48 = 5 # 48-bit pointer (16-bit base:32-bit offset).
FIXUP_HI8 = 6 # high 8 bits of 16bit offset
FIXUP_HI16 = 7 # high 16 bits of 32bit offset
FIXUP_LOW8 = 8 # low 8 bits of 16bit offset
FIXUP_LOW16 = 9 # low 16 bits of 32bit offset
FIXUP_OFF64 = 12 # 64-bit offset
FIXUP_CUSTOM = 0x8000 # fixups with this bit are processed by
# processor module/plugin
def get_fixup_target_flags(ea):
"""
Get fixup target flags
@param ea: address to get information about
@return: 0 - no fixup at the specified address
otherwise returns fixup target flags
"""
fd = ida_fixup.fixup_data_t()
if not fd.get(ea):
return 0
return fd.get_flags()
FIXUPF_REL = 0x1 # fixup is relative to the linear address
FIXUPF_EXTDEF = 0x2 # target is a location (otherwise - segment)
FIXUPF_UNUSED = 0x4 # fixup is ignored by IDA
FIXUPF_CREATED = 0x8 # fixup was not present in the input file
def get_fixup_target_sel(ea):
"""
Get fixup target selector
@param ea: address to get information about
@return: BADSEL - no fixup at the specified address
otherwise returns fixup target selector
"""
fd = ida_fixup.fixup_data_t()
if not fd.get(ea):
return BADSEL
return fd.sel
def get_fixup_target_off(ea):
"""
Get fixup target offset
@param ea: address to get information about
@return: BADADDR - no fixup at the specified address
otherwise returns fixup target offset
"""
fd = ida_fixup.fixup_data_t()
if not fd.get(ea):
return BADADDR
return fd.off
def get_fixup_target_dis(ea):
"""
Get fixup target displacement
@param ea: address to get information about
@return: 0 - no fixup at the specified address
otherwise returns fixup target displacement
"""
fd = ida_fixup.fixup_data_t()
if not fd.get(ea):
return 0
return fd.displacement
def set_fixup(ea, fixuptype, fixupflags, targetsel, targetoff, displ):
"""
Set fixup information
@param ea: address to set fixup information about
@param fixuptype: fixup type. see get_fixup_target_type()
for possible fixup types.
@param fixupflags: fixup flags. see get_fixup_target_flags()
for possible fixup types.
@param targetsel: target selector
@param targetoff: target offset
@param displ: displacement
@return: none
"""
fd = ida_fixup.fixup_data_t(fixuptype, fixupflags)
fd.sel = targetsel
fd.off = targetoff
fd.displacement = displ
fd.set(ea)
del_fixup = ida_fixup.del_fixup
#----------------------------------------------------------------------------
# M A R K E D P O S I T I O N S
#----------------------------------------------------------------------------
put_bookmark = ida_idc.mark_position
get_bookmark = ida_idc.get_marked_pos
get_bookmark_desc = ida_idc.get_mark_comment
# ----------------------------------------------------------------------------
# S T R U C T U R E S
# ----------------------------------------------------------------------------
get_struc_qty = ida_struct.get_struc_qty
get_first_struc_idx = ida_struct.get_first_struc_idx
get_last_struc_idx = ida_struct.get_last_struc_idx
get_next_struc_idx = ida_struct.get_next_struc_idx
get_prev_struc_idx = ida_struct.get_prev_struc_idx
get_struc_idx = ida_struct.get_struc_idx
get_struc_by_idx = ida_struct.get_struc_by_idx
get_struc_id = ida_struct.get_struc_id
get_struc_name = ida_struct.get_struc_name
get_struc_cmt = ida_struct.get_struc_cmt
get_struc_size = ida_struct.get_struc_size
def get_member_qty(sid):
"""
Get number of members of a structure
@param sid: structure type ID
@return: -1 if bad structure type ID is passed otherwise
returns number of members.
@note: Union members are, in IDA's internals, located
at subsequent byte offsets: member 0 -> offset 0x0,
member 1 -> offset 0x1, etc...
"""
s = ida_struct.get_struc(sid)
return -1 if not s else s.memqty
def get_member_id(sid, member_offset):
"""
@param sid: structure type ID
@param member_offset:. The offset can be
any offset in the member. For example,
is a member is 4 bytes long and starts
at offset 2, then 2,3,4,5 denote
the same structure member.
@return: -1 if bad structure type ID is passed or there is
no member at the specified offset.
otherwise returns the member id.
"""
s = ida_struct.get_struc(sid)
if not s:
return -1
m = ida_struct.get_member(s, member_offset)
if not m:
return -1
return m.id
def get_prev_offset(sid, offset):
"""
Get previous offset in a structure
@param sid: structure type ID
@param offset: current offset
@return: -1 if bad structure type ID is passed,
ida_idaapi.BADADDR if no (more) offsets in the structure,
otherwise returns previous offset in a structure.
@note: IDA allows 'holes' between members of a
structure. It treats these 'holes'
as unnamed arrays of bytes.
This function returns a member offset or a hole offset.
It will return size of the structure if input
'offset' is bigger than the structure size.
@note: Union members are, in IDA's internals, located
at subsequent byte offsets: member 0 -> offset 0x0,
member 1 -> offset 0x1, etc...
"""
s = ida_struct.get_struc(sid)
if not s:
return -1
return ida_struct.get_struc_prev_offset(s, offset)
def get_next_offset(sid, offset):
"""
Get next offset in a structure
@param sid: structure type ID
@param offset: current offset
@return: -1 if bad structure type ID is passed,
ida_idaapi.BADADDR if no (more) offsets in the structure,
otherwise returns next offset in a structure.
@note: IDA allows 'holes' between members of a
structure. It treats these 'holes'
as unnamed arrays of bytes.
This function returns a member offset or a hole offset.
It will return size of the structure if input
'offset' belongs to the last member of the structure.
@note: Union members are, in IDA's internals, located
at subsequent byte offsets: member 0 -> offset 0x0,
member 1 -> offset 0x1, etc...
"""
s = ida_struct.get_struc(sid)
return -1 if not s else ida_struct.get_struc_next_offset(s, offset)
def get_first_member(sid):
"""
Get offset of the first member of a structure
@param sid: structure type ID
@return: -1 if bad structure type ID is passed,
ida_idaapi.BADADDR if structure has no members,
otherwise returns offset of the first member.
@note: IDA allows 'holes' between members of a
structure. It treats these 'holes'
as unnamed arrays of bytes.
@note: Union members are, in IDA's internals, located
at subsequent byte offsets: member 0 -> offset 0x0,
member 1 -> offset 0x1, etc...
"""
s = ida_struct.get_struc(sid)
if not s:
return -1
return ida_struct.get_struc_first_offset(s)
def get_last_member(sid):
"""
Get offset of the last member of a structure
@param sid: structure type ID
@return: -1 if bad structure type ID is passed,
ida_idaapi.BADADDR if structure has no members,
otherwise returns offset of the last member.
@note: IDA allows 'holes' between members of a
structure. It treats these 'holes'
as unnamed arrays of bytes.
@note: Union members are, in IDA's internals, located
at subsequent byte offsets: member 0 -> offset 0x0,
member 1 -> offset 0x1, etc...
"""
s = ida_struct.get_struc(sid)
if not s:
return -1
return ida_struct.get_struc_last_offset(s)
def get_member_offset(sid, member_name):
"""
Get offset of a member of a structure by the member name
@param sid: structure type ID
@param member_name: name of structure member
@return: -1 if bad structure type ID is passed
or no such member in the structure
otherwise returns offset of the specified member.
@note: Union members are, in IDA's internals, located
at subsequent byte offsets: member 0 -> offset 0x0,
member 1 -> offset 0x1, etc...
"""
s = ida_struct.get_struc(sid)
if not s:
return -1
m = ida_struct.get_member_by_name(s, member_name)
if not m:
return -1
return m.get_soff()
def get_member_name(sid, member_offset):
"""
Get name of a member of a structure
@param sid: structure type ID
@param member_offset: member offset. The offset can be
any offset in the member. For example,
is a member is 4 bytes long and starts
at offset 2, then 2,3,4,5 denote
the same structure member.
@return: None if bad structure type ID is passed
or no such member in the structure
otherwise returns name of the specified member.
"""
s = ida_struct.get_struc(sid)
if not s:
return None
m = ida_struct.get_member(s, member_offset)
if not m:
return None
return ida_struct.get_member_name(m.id)
def get_member_cmt(sid, member_offset, repeatable):
"""
Get comment of a member
@param sid: structure type ID
@param member_offset: member offset. The offset can be
any offset in the member. For example,
is a member is 4 bytes long and starts
at offset 2, then 2,3,4,5 denote
the same structure member.
@param repeatable: 1: get repeatable comment
0: get regular comment
@return: None if bad structure type ID is passed
or no such member in the structure
otherwise returns comment of the specified member.
"""
s = ida_struct.get_struc(sid)
if not s:
return None
m = ida_struct.get_member(s, member_offset)
if not m:
return None
return ida_struct.get_member_cmt(m.id, repeatable)
def get_member_size(sid, member_offset):
"""
Get size of a member
@param sid: structure type ID
@param member_offset: member offset. The offset can be
any offset in the member. For example,
is a member is 4 bytes long and starts
at offset 2, then 2,3,4,5 denote
the same structure member.
@return: None if bad structure type ID is passed,
or no such member in the structure
otherwise returns size of the specified
member in bytes.
"""
s = ida_struct.get_struc(sid)
if not s:
return None
m = ida_struct.get_member(s, member_offset)
if not m:
return None
return ida_struct.get_member_size(m)
def get_member_flag(sid, member_offset):
"""
Get type of a member
@param sid: structure type ID
@param member_offset: member offset. The offset can be
any offset in the member. For example,
is a member is 4 bytes long and starts
at offset 2, then 2,3,4,5 denote
the same structure member.
@return: -1 if bad structure type ID is passed
or no such member in the structure
otherwise returns type of the member, see bit
definitions above. If the member type is a structure
then function GetMemberStrid() should be used to
get the structure type id.
"""
s = ida_struct.get_struc(sid)
if not s:
return -1
m = ida_struct.get_member(s, member_offset)
return -1 if not m else m.flag
def get_member_strid(sid, member_offset):
"""
Get structure id of a member
@param sid: structure type ID
@param member_offset: member offset. The offset can be
any offset in the member. For example,
is a member is 4 bytes long and starts
at offset 2, then 2,3,4,5 denote
the same structure member.
@return: -1 if bad structure type ID is passed
or no such member in the structure
otherwise returns structure id of the member.
If the current member is not a structure, returns -1.
"""
s = ida_struct.get_struc(sid)
if not s:
return -1
m = ida_struct.get_member(s, member_offset)
if not m:
return -1
cs = ida_struct.get_sptr(m)
if cs:
return cs.id
else:
return -1
def is_union(sid):
"""
Is a structure a union?
@param sid: structure type ID
@return: 1: yes, this is a union id
0: no
@note: Unions are a special kind of structures
"""
s = ida_struct.get_struc(sid)
if not s:
return 0
return s.is_union()
def add_struc(index, name, is_union):
"""
Define a new structure type
@param index: index of new structure type
If another structure has the specified index,
then index of that structure and all other
structures will be incremented, freeing the specifed
index. If index is == -1, then the biggest index
number will be used.
See get_first_struc_idx() for the explanation of
structure indices and IDs.
@param name: name of the new structure type.
@param is_union: 0: structure
1: union
@return: -1 if can't define structure type because of
bad structure name: the name is ill-formed or is
already used in the program.
otherwise returns ID of the new structure type
"""
if index == -1:
index = BADADDR
return ida_struct.add_struc(index, name, is_union)
def del_struc(sid):
"""
Delete a structure type
@param sid: structure type ID
@return: 0 if bad structure type ID is passed
1 otherwise the structure type is deleted. All data
and other structure types referencing to the
deleted structure type will be displayed as array
of bytes.
"""
s = ida_struct.get_struc(sid)
if not s:
return 0
return ida_struct.del_struc(s)
def set_struc_idx(sid, index):
"""
Change structure index
@param sid: structure type ID
@param index: new index of the structure
@return: != 0 - ok
@note: See get_first_struc_idx() for the explanation of
structure indices and IDs.
"""
s = ida_struct.get_struc(sid)
if not s:
return 0
return ida_struct.set_struc_idx(s, index)
set_struc_name = ida_struct.set_struc_name
set_struc_cmt = ida_struct.set_struc_cmt
def add_struc_member(sid, name, offset, flag, typeid, nbytes, target=-1, tdelta=0, reftype=REF_OFF32):
"""
Add structure member
@param sid: structure type ID
@param name: name of the new member
@param offset: offset of the new member
-1 means to add at the end of the structure
@param flag: type of the new member. Should be one of
FF_BYTE..FF_PACKREAL (see above) combined with FF_DATA
@param typeid: if is_struct(flag) then typeid specifies the structure id for the member
if is_off0(flag) then typeid specifies the offset base.
if is_strlit(flag) then typeid specifies the string type (STRTYPE_...).
if is_stroff(flag) then typeid specifies the structure id
if is_enum(flag) then typeid specifies the enum id
if is_custom(flags) then typeid specifies the dtid and fid: dtid|(fid<<16)
Otherwise typeid should be -1.
@param nbytes: number of bytes in the new member
@param target: target address of the offset expr. You may specify it as
-1, ida will calculate it itself
@param tdelta: offset target delta. usually 0
@param reftype: see REF_... definitions
@note: The remaining arguments are allowed only if is_off0(flag) and you want
to specify a complex offset expression
@return: 0 - ok, otherwise error code (one of STRUC_ERROR_*)
"""
if is_off0(flag):
return eval_idc('add_struc_member(%d, "%s", %d, %d, %d, %d, %d, %d, %d);' % (sid, ida_kernwin.str2user(name or ""), offset, flag, typeid, nbytes,
target, tdelta, reftype))
else:
return eval_idc('add_struc_member(%d, "%s", %d, %d, %d, %d);' % (sid, ida_kernwin.str2user(name or ""), offset, flag, typeid, nbytes))
STRUC_ERROR_MEMBER_NAME = -1 # already has member with this name (bad name)
STRUC_ERROR_MEMBER_OFFSET = -2 # already has member at this offset
STRUC_ERROR_MEMBER_SIZE = -3 # bad number of bytes or bad sizeof(type)
STRUC_ERROR_MEMBER_TINFO = -4 # bad typeid parameter
STRUC_ERROR_MEMBER_STRUCT = -5 # bad struct id (the 1st argument)
STRUC_ERROR_MEMBER_UNIVAR = -6 # unions can't have variable sized members
STRUC_ERROR_MEMBER_VARLAST = -7 # variable sized member should be the last member in the structure
def del_struc_member(sid, member_offset):
"""
Delete structure member
@param sid: structure type ID
@param member_offset: offset of the member
@return: != 0 - ok.
@note: IDA allows 'holes' between members of a
structure. It treats these 'holes'
as unnamed arrays of bytes.
"""
s = ida_struct.get_struc(sid)
if not s:
return 0
return ida_struct.del_struc_member(s, member_offset)
def set_member_name(sid, member_offset, name):
"""
Change structure member name
@param sid: structure type ID
@param member_offset: offset of the member
@param name: new name of the member
@return: != 0 - ok.
"""
s = ida_struct.get_struc(sid)
if not s:
return 0
return ida_struct.set_member_name(s, member_offset, name)
def set_member_type(sid, member_offset, flag, typeid, nitems, target=-1, tdelta=0, reftype=REF_OFF32):
"""
Change structure member type
@param sid: structure type ID
@param member_offset: offset of the member
@param flag: new type of the member. Should be one of
FF_BYTE..FF_PACKREAL (see above) combined with FF_DATA
@param typeid: if is_struct(flag) then typeid specifies the structure id for the member
if is_off0(flag) then typeid specifies the offset base.
if is_strlit(flag) then typeid specifies the string type (STRTYPE_...).
if is_stroff(flag) then typeid specifies the structure id
if is_enum(flag) then typeid specifies the enum id
if is_custom(flags) then typeid specifies the dtid and fid: dtid|(fid<<16)
Otherwise typeid should be -1.
@param nitems: number of items in the member
@param target: target address of the offset expr. You may specify it as
-1, ida will calculate it itself
@param tdelta: offset target delta. usually 0
@param reftype: see REF_... definitions
@note: The remaining arguments are allowed only if is_off0(flag) and you want
to specify a complex offset expression
@return: !=0 - ok.
"""
if is_off0(flag):
return eval_idc('set_member_type(%d, %d, %d, %d, %d, %d, %d, %d);' % (sid, member_offset, flag, typeid, nitems,
target, tdelta, reftype))
else:
return eval_idc('set_member_type(%d, %d, %d, %d, %d);' % (sid, member_offset, flag, typeid, nitems))
def set_member_cmt(sid, member_offset, comment, repeatable):
"""
Change structure member comment
@param sid: structure type ID
@param member_offset: offset of the member
@param comment: new comment of the structure member
@param repeatable: 1: change repeatable comment
0: change regular comment
@return: != 0 - ok
"""
s = ida_struct.get_struc(sid)
if not s:
return 0
m = ida_struct.get_member(s, member_offset)
if not m:
return 0
return ida_struct.set_member_cmt(m, comment, repeatable)
def expand_struc(sid, offset, delta, recalc):
"""
Expand or shrink a structure type
@param id: structure type ID
@param offset: offset in the structure
@param delta: how many bytes to add or remove
@param recalc: recalculate the locations where the structure
type is used
@return: != 0 - ok
"""
s = ida_struct.get_struc(sid)
if not s:
return 0
return ida_struct.expand_struc(s, offset, delta, recalc)
def get_fchunk_attr(ea, attr):
"""
Get a function chunk attribute
@param ea: any address in the chunk
@param attr: one of: FUNCATTR_START, FUNCATTR_END, FUNCATTR_OWNER, FUNCATTR_REFQTY
@return: desired attribute or -1
"""
func = ida_funcs.get_fchunk(ea)
return _IDC_GetAttr(func, _FUNCATTRMAP, attr) if func else BADADDR
def set_fchunk_attr(ea, attr, value):
"""
Set a function chunk attribute
@param ea: any address in the chunk
@param attr: only FUNCATTR_START, FUNCATTR_END, FUNCATTR_OWNER
@param value: desired value
@return: 0 if failed, 1 if success
"""
if attr in [ FUNCATTR_START, FUNCATTR_END, FUNCATTR_OWNER ]:
chunk = ida_funcs.get_fchunk(ea)
if chunk:
_IDC_SetAttr(chunk, _FUNCATTRMAP, attr, value)
return ida_funcs.update_func(chunk)
return 0
get_fchunk_referer = ida_funcs.get_fchunk_referer
def get_next_fchunk(ea):
"""
Get next function chunk
@param ea: any address
@return: the starting address of the next function chunk or BADADDR
@note: This function enumerates all chunks of all functions in the database
"""
func = ida_funcs.get_next_fchunk(ea)
if func:
return func.start_ea
else:
return BADADDR
def get_prev_fchunk(ea):
"""
Get previous function chunk
@param ea: any address
@return: the starting address of the function chunk or BADADDR
@note: This function enumerates all chunks of all functions in the database
"""
func = ida_funcs.get_prev_fchunk(ea)
if func:
return func.start_ea
else:
return BADADDR
def append_func_tail(funcea, ea1, ea2):
"""
Append a function chunk to the function
@param funcea: any address in the function
@param ea1: start of function tail
@param ea2: end of function tail
@return: 0 if failed, 1 if success
@note: If a chunk exists at the specified addresses, it must have exactly
the specified boundaries
"""
func = ida_funcs.get_func(funcea)
if not func:
return 0
else:
return ida_funcs.append_func_tail(func, ea1, ea2)
def remove_fchunk(funcea, tailea):
"""
Remove a function chunk from the function
@param funcea: any address in the function
@param tailea: any address in the function chunk to remove
@return: 0 if failed, 1 if success
"""
func = ida_funcs.get_func(funcea)
if not func:
return 0
else:
return ida_funcs.remove_func_tail(func, tailea)
def set_tail_owner(tailea, funcea):
"""
Change the function chunk owner
@param tailea: any address in the function chunk
@param funcea: the starting address of the new owner
@return: False if failed, True if success
@note: The new owner must already have the chunk appended before the call
"""
tail = ida_funcs.get_fchunk(tailea)
if not tail:
return False
else:
return ida_funcs.set_tail_owner(tail, funcea)
def first_func_chunk(funcea):
"""
Get the first function chunk of the specified function
@param funcea: any address in the function
@return: the function entry point or BADADDR
@note: This function returns the first (main) chunk of the specified function
"""
func = ida_funcs.get_func(funcea)
fci = ida_funcs.func_tail_iterator_t(func, funcea)
if fci.main():
return fci.chunk().start_ea
else:
return BADADDR
def next_func_chunk(funcea, tailea):
"""
Get the next function chunk of the specified function
@param funcea: any address in the function
@param tailea: any address in the current chunk
@return: the starting address of the next function chunk or BADADDR
@note: This function returns the next chunk of the specified function
"""
func = ida_funcs.get_func(funcea)
fci = ida_funcs.func_tail_iterator_t(func, funcea)
if not fci.main():
return BADADDR
# Iterate and try to find the current chunk
found = False
while True:
if fci.chunk().start_ea <= tailea and \
fci.chunk().end_ea > tailea:
found = True
break
if not next(fci):
break
# Return the next chunk, if there is one
if found and next(fci):
return fci.chunk().start_ea
else:
return BADADDR
# ----------------------------------------------------------------------------
# E N U M S
# ----------------------------------------------------------------------------
get_enum_qty = ida_enum.get_enum_qty
getn_enum = ida_enum.getn_enum
get_enum_idx = ida_enum.get_enum_idx
get_enum = ida_enum.get_enum
get_enum_name = ida_enum.get_enum_name
get_enum_cmt = ida_enum.get_enum_cmt
get_enum_size = ida_enum.get_enum_size
get_enum_width = ida_enum.get_enum_width
get_enum_flag = ida_enum.get_enum_flag
get_enum_member_by_name = ida_enum.get_enum_member_by_name
get_enum_member_value = ida_enum.get_enum_member_value
get_enum_member_bmask = ida_enum.get_enum_member_bmask
get_enum_member_enum = ida_enum.get_enum_member_enum
def get_enum_member(enum_id, value, serial, bmask):
"""
Get id of constant
@param enum_id: id of enum
@param value: value of constant
@param serial: serial number of the constant in the
enumeration. See op_enum() for details.
@param bmask: bitmask of the constant
ordinary enums accept only ida_enum.DEFMASK as a bitmask
@return: id of constant or -1 if error
"""
if bmask < 0:
bmask &= BADADDR
return ida_enum.get_enum_member(enum_id, value, serial, bmask)
get_first_bmask = ida_enum.get_first_bmask
get_last_bmask = ida_enum.get_last_bmask
get_next_bmask = ida_enum.get_next_bmask
get_prev_bmask = ida_enum.get_prev_bmask
def get_bmask_name(enum_id, bmask):
"""
Get bitmask name (only for bitfields)
@param enum_id: id of enum
@param bmask: bitmask of the constant
@return: name of bitmask or None
"""
if bmask < 0:
bmask &= BADADDR
return ida_enum.get_bmask_name(enum_id, bmask)
def get_bmask_cmt(enum_id, bmask, repeatable):
"""
Get bitmask comment (only for bitfields)
@param enum_id: id of enum
@param bmask: bitmask of the constant
@param repeatable: type of comment, 0-regular, 1-repeatable
@return: comment attached to bitmask or None
"""
if bmask < 0:
bmask &= BADADDR
return ida_enum.get_bmask_cmt(enum_id, bmask, repeatable)
def set_bmask_name(enum_id, bmask, name):
"""
Set bitmask name (only for bitfields)
@param enum_id: id of enum
@param bmask: bitmask of the constant
@param name: name of bitmask
@return: 1-ok, 0-failed
"""
if bmask < 0:
bmask &= BADADDR
return ida_enum.set_bmask_name(enum_id, bmask, name)
def set_bmask_cmt(enum_id, bmask, cmt, repeatable):
"""
Set bitmask comment (only for bitfields)
@param enum_id: id of enum
@param bmask: bitmask of the constant
@param cmt: comment
repeatable - type of comment, 0-regular, 1-repeatable
@return: 1-ok, 0-failed
"""
if bmask < 0:
bmask &= BADADDR
return ida_enum.set_bmask_cmt(enum_id, bmask, cmt, repeatable)
def get_first_enum_member(enum_id, bmask):
"""
Get first constant in the enum
@param enum_id: id of enum
@param bmask: bitmask of the constant (ordinary enums accept only ida_enum.DEFMASK as a bitmask)
@return: value of constant or idaapi.BADNODE no constants are defined
All constants are sorted by their values as unsigned longs.
"""
if bmask < 0:
bmask &= BADADDR
return ida_enum.get_first_enum_member(enum_id, bmask)
def get_last_enum_member(enum_id, bmask):
"""
Get last constant in the enum
@param enum_id: id of enum
@param bmask: bitmask of the constant (ordinary enums accept only ida_enum.DEFMASK as a bitmask)
@return: value of constant or idaapi.BADNODE no constants are defined
All constants are sorted by their values
as unsigned longs.
"""
if bmask < 0:
bmask &= BADADDR
return ida_enum.get_last_enum_member(enum_id, bmask)
def get_next_enum_member(enum_id, value, bmask):
"""
Get next constant in the enum
@param enum_id: id of enum
@param bmask: bitmask of the constant ordinary enums accept only ida_enum.DEFMASK as a bitmask
@param value: value of the current constant
@return: value of a constant with value higher than the specified
value. idaapi.BADNODE no such constants exist.
All constants are sorted by their values as unsigned longs.
"""
if bmask < 0:
bmask &= BADADDR
return ida_enum.get_next_enum_member(enum_id, value, bmask)
def get_prev_enum_member(enum_id, value, bmask):
"""
Get prev constant in the enum
@param enum_id: id of enum
@param bmask : bitmask of the constant
ordinary enums accept only ida_enum.DEFMASK as a bitmask
@param value: value of the current constant
@return: value of a constant with value lower than the specified
value. idaapi.BADNODE no such constants exist.
All constants are sorted by their values as unsigned longs.
"""
if bmask < 0:
bmask &= BADADDR
return ida_enum.get_prev_enum_member(enum_id, value, bmask)
def get_enum_member_name(const_id):
"""
Get name of a constant
@param const_id: id of const
Returns: name of constant
"""
name = ida_enum.get_enum_member_name(const_id)
if not name:
return ""
else:
return name
def get_enum_member_cmt(const_id, repeatable):
"""
Get comment of a constant
@param const_id: id of const
@param repeatable: 0:get regular comment, 1:get repeatable comment
@return: comment string
"""
cmt = ida_enum.get_enum_member_cmt(const_id, repeatable)
if not cmt:
return ""
else:
return cmt
def add_enum(idx, name, flag):
"""
Add a new enum type
@param idx: serial number of the new enum.
If another enum with the same serial number
exists, then all enums with serial
numbers >= the specified idx get their
serial numbers incremented (in other words,
the new enum is put in the middle of the list of enums).
If idx >= get_enum_qty() or idx == idaapi.BADNODE
then the new enum is created at the end of
the list of enums.
@param name: name of the enum.
@param flag: flags for representation of numeric constants
in the definition of enum.
@return: id of new enum or BADADDR
"""
if idx < 0:
idx = idx & SIZE_MAX
return ida_enum.add_enum(idx, name, flag)
del_enum = ida_enum.del_enum
set_enum_idx = ida_enum.set_enum_idx
set_enum_name = ida_enum.set_enum_name
set_enum_cmt = ida_enum.set_enum_cmt
set_enum_flag = ida_enum.set_enum_flag
set_enum_bf = ida_enum.set_enum_bf
set_enum_width = ida_enum.set_enum_width
is_bf = ida_enum.is_bf
def add_enum_member(enum_id, name, value, bmask):
"""
Add a member of enum - a symbolic constant
@param enum_id: id of enum
@param name: name of symbolic constant. Must be unique in the program.
@param value: value of symbolic constant.
@param bmask: bitmask of the constant
ordinary enums accept only ida_enum.DEFMASK as a bitmask
all bits set in value should be set in bmask too
@return: 0-ok, otherwise error code (one of ENUM_MEMBER_ERROR_*)
"""
if bmask < 0:
bmask &= BADADDR
return ida_enum.add_enum_member(enum_id, name, value, bmask)
ENUM_MEMBER_ERROR_NAME = ida_enum.ENUM_MEMBER_ERROR_NAME # already have member with this name (bad name)
ENUM_MEMBER_ERROR_VALUE = ida_enum.ENUM_MEMBER_ERROR_VALUE # already have member with this value
ENUM_MEMBER_ERROR_ENUM = ida_enum.ENUM_MEMBER_ERROR_ENUM # bad enum id
ENUM_MEMBER_ERROR_MASK = ida_enum.ENUM_MEMBER_ERROR_MASK # bad bmask
ENUM_MEMBER_ERROR_ILLV = ida_enum.ENUM_MEMBER_ERROR_ILLV # bad bmask and value combination (~bmask & value != 0)
def del_enum_member(enum_id, value, serial, bmask):
"""
Delete a member of enum - a symbolic constant
@param enum_id: id of enum
@param value: value of symbolic constant.
@param serial: serial number of the constant in the
enumeration. See op_enum() for for details.
@param bmask: bitmask of the constant ordinary enums accept
only ida_enum.DEFMASK as a bitmask
@return: 1-ok, 0-failed
"""
if bmask < 0:
bmask &= BADADDR
return ida_enum.del_enum_member(enum_id, value, serial, bmask)
set_enum_member_name = ida_enum.set_enum_member_name
set_enum_member_cmt = ida_enum.set_enum_member_cmt
#----------------------------------------------------------------------------
# A R R A Y S I N I D C
#----------------------------------------------------------------------------
_IDC_ARRAY_PREFIX = "$ idc_array "
def __l2m1(v):
"""
Long to minus 1: If the 'v' appears to be the
'signed long' version of -1, then return -1.
Otherwise, return 'v'.
"""
if v == ida_netnode.BADNODE:
return -1
else:
return v
AR_LONG = ida_netnode.atag
"""Array of longs"""
AR_STR = ida_netnode.stag
"""Array of strings"""
class __dummy_netnode(object):
"""
Implements, in an "always failing" fashion, the
netnode functions that are necessary for the
array-related functions.
The sole purpose of this singleton class is to
serve as a placeholder for netnode-manipulating
functions, that don't want to each have to perform
checks on the existence of the netnode.
(..in other words: it avoids a bunch of if/else's).
See __GetArrayById() for more info.
"""
def rename(self, *args): return 0
def kill(self, *args): pass
def index(self, *args): return -1
def altset(self, *args): return 0
def supset(self, *args): return 0
def altval(self, *args): return 0
def supval(self, *args): return 0
def altdel(self, *args): return 0
def supdel(self, *args): return 0
def altfirst(self, *args): return -1
def supfirst(self, *args): return -1
def altlast(self, *args): return -1
def suplast(self, *args): return -1
def altnext(self, *args): return -1
def supnext(self, *args): return -1
def altprev(self, *args): return -1
def supprev(self, *args): return -1
def hashset(self, *args): return 0
def hashval(self, *args): return 0
def hashstr(self, *args): return 0
def hashstr_buf(self, *args): return 0
def hashset_idx(self, *args): return 0
def hashset_buf(self, *args): return 0
def hashval_long(self, *args): return 0
def hashdel(self, *args): return 0
def hashfirst(self, *args): return 0
def hashnext(self, *args): return 0
def hashprev(self, *args): return 0
def hashlast(self, *args): return 0
__dummy_netnode.instance = __dummy_netnode()
def __GetArrayById(array_id):
"""
Get an array, by its ID.
This (internal) wrapper around 'idaaip.netnode(array_id)'
will ensure a certain safety around the retrieval of
arrays (by catching quite unexpect[ed|able] exceptions,
and making sure we don't create & use `transient' netnodes).
@param array_id: A positive, valid array ID.
"""
try:
node = ida_netnode.netnode(array_id)
nodename = node.get_name()
if nodename is None or not nodename.startswith(_IDC_ARRAY_PREFIX):
return __dummy_netnode.instance
else:
return node
except TypeError:
return __dummy_netnode.instance
except NotImplementedError:
return __dummy_netnode.instance
def create_array(name):
"""
Create array.
@param name: The array name.
@return: -1 in case of failure, a valid array_id otherwise.
"""
node = ida_netnode.netnode()
res = node.create(_IDC_ARRAY_PREFIX + name)
if res == False:
return -1
else:
return node.index()
def get_array_id(name):
"""
Get array array_id, by name.
@param name: The array name.
@return: -1 in case of failure (i.e., no array with that
name exists), a valid array_id otherwise.
"""
return __l2m1(ida_netnode.netnode(_IDC_ARRAY_PREFIX + name, 0, False).index())
def rename_array(array_id, newname):
"""
Rename array, by its ID.
@param id: The ID of the array to rename.
@param newname: The new name of the array.
@return: 1 in case of success, 0 otherwise
"""
return __GetArrayById(array_id).rename(_IDC_ARRAY_PREFIX + newname) == 1
def delete_array(array_id):
"""
Delete array, by its ID.
@param array_id: The ID of the array to delete.
"""
__GetArrayById(array_id).kill()
def set_array_long(array_id, idx, value):
"""
Sets the long value of an array element.
@param array_id: The array ID.
@param idx: Index of an element.
@param value: 32bit or 64bit value to store in the array
@return: 1 in case of success, 0 otherwise
"""
return __GetArrayById(array_id).altset(idx, value)
def set_array_string(array_id, idx, value):
"""
Sets the string value of an array element.
@param array_id: The array ID.
@param idx: Index of an element.
@param value: String value to store in the array
@return: 1 in case of success, 0 otherwise
"""
return __GetArrayById(array_id).supset(idx, value)
def get_array_element(tag, array_id, idx):
"""
Get value of array element.
@param tag: Tag of array, specifies one of two array types: AR_LONG, AR_STR
@param array_id: The array ID.
@param idx: Index of an element.
@return: Value of the specified array element. Note that
this function may return char or long result. Unexistent
array elements give zero as a result.
"""
node = __GetArrayById(array_id)
if tag == AR_LONG:
return node.altval(idx, tag)
elif tag == AR_STR:
res = node.supval(idx, tag)
return 0 if res is None else res
else:
return 0
def del_array_element(tag, array_id, idx):
"""
Delete an array element.
@param tag: Tag of array, specifies one of two array types: AR_LONG, AR_STR
@param array_id: The array ID.
@param idx: Index of an element.
@return: 1 in case of success, 0 otherwise.
"""
node = __GetArrayById(array_id)
if tag == AR_LONG:
return node.altdel(idx, tag)
elif tag == AR_STR:
return node.supdel(idx, tag)
else:
return 0
def get_first_index(tag, array_id):
"""
Get index of the first existing array element.
@param tag: Tag of array, specifies one of two array types: AR_LONG, AR_STR
@param array_id: The array ID.
@return: -1 if the array is empty, otherwise index of first array
element of given type.
"""
node = __GetArrayById(array_id)
if tag == AR_LONG:
return __l2m1(node.altfirst(tag))
elif tag == AR_STR:
return __l2m1(node.supfirst(tag))
else:
return -1
def get_last_index(tag, array_id):
"""
Get index of last existing array element.
@param tag: Tag of array, specifies one of two array types: AR_LONG, AR_STR
@param array_id: The array ID.
@return: -1 if the array is empty, otherwise index of first array
element of given type.
"""
node = __GetArrayById(array_id)
if tag == AR_LONG:
return __l2m1(node.altlast(tag))
elif tag == AR_STR:
return __l2m1(node.suplast(tag))
else:
return -1
def get_next_index(tag, array_id, idx):
"""
Get index of the next existing array element.
@param tag: Tag of array, specifies one of two array types: AR_LONG, AR_STR
@param array_id: The array ID.
@param idx: Index of the current element.
@return: -1 if no more elements, otherwise returns index of the
next array element of given type.
"""
node = __GetArrayById(array_id)
try:
if tag == AR_LONG:
return __l2m1(node.altnext(idx, tag))
elif tag == AR_STR:
return __l2m1(node.supnext(idx, tag))
else:
return -1
except OverflowError:
# typically: An index of -1 was passed.
return -1
def get_prev_index(tag, array_id, idx):
"""
Get index of the previous existing array element.
@param tag: Tag of array, specifies one of two array types: AR_LONG, AR_STR
@param array_id: The array ID.
@param idx: Index of the current element.
@return: -1 if no more elements, otherwise returns index of the
previous array element of given type.
"""
node = __GetArrayById(array_id)
try:
if tag == AR_LONG:
return __l2m1(node.altprev(idx, tag))
elif tag == AR_STR:
return __l2m1(node.supprev(idx, tag))
else:
return -1
except OverflowError:
# typically: An index of -1 was passed.
return -1
# -------------------- hashes -----------------------
def set_hash_long(hash_id, key, value):
"""
Sets the long value of a hash element.
@param hash_id: The hash ID.
@param key: Key of an element.
@param value: 32bit or 64bit value to store in the hash
@return: 1 in case of success, 0 otherwise
"""
return __GetArrayById(hash_id).hashset_idx(key, value)
def get_hash_long(hash_id, key):
"""
Gets the long value of a hash element.
@param hash_id: The hash ID.
@param key: Key of an element.
@return: the 32bit or 64bit value of the element, or 0 if no such
element.
"""
return __GetArrayById(hash_id).hashval_long(key);
def set_hash_string(hash_id, key, value):
"""
Sets the string value of a hash element.
@param hash_id: The hash ID.
@param key: Key of an element.
@param value: string value to store in the hash
@return: 1 in case of success, 0 otherwise
"""
return __GetArrayById(hash_id).hashset_buf(key, value)
def get_hash_string(hash_id, key):
"""
Gets the string value of a hash element.
@param hash_id: The hash ID.
@param key: Key of an element.
@return: the string value of the element, or None if no such
element.
"""
return __GetArrayById(hash_id).hashstr_buf(key);
def del_hash_string(hash_id, key):
"""
Delete a hash element.
@param hash_id: The hash ID.
@param key: Key of an element
@return: 1 upon success, 0 otherwise.
"""
return __GetArrayById(hash_id).hashdel(key)
def get_first_hash_key(hash_id):
"""
Get the first key in the hash.
@param hash_id: The hash ID.
@return: the key, 0 otherwise.
"""
r = __GetArrayById(hash_id).hashfirst()
return 0 if r is None else r
def get_last_hash_key(hash_id):
"""
Get the last key in the hash.
@param hash_id: The hash ID.
@return: the key, 0 otherwise.
"""
r = __GetArrayById(hash_id).hashlast()
return 0 if r is None else r
def get_next_hash_key(hash_id, key):
"""
Get the next key in the hash.
@param hash_id: The hash ID.
@param key: The current key.
@return: the next key, 0 otherwise
"""
r = __GetArrayById(hash_id).hashnext(key)
return 0 if r is None else r
def get_prev_hash_key(hash_id, key):
"""
Get the previous key in the hash.
@param hash_id: The hash ID.
@param key: The current key.
@return: the previous key, 0 otherwise
"""
r = __GetArrayById(hash_id).hashprev(key)
return 0 if r is None else r
#----------------------------------------------------------------------------
# S O U R C E F I L E / L I N E N U M B E R S
#----------------------------------------------------------------------------
add_sourcefile = ida_lines.add_sourcefile
get_sourcefile = ida_lines.get_sourcefile
del_sourcefile = ida_lines.del_sourcefile
set_source_linnum = ida_nalt.set_source_linnum
get_source_linnum = ida_nalt.get_source_linnum
del_source_linnum = ida_nalt.del_source_linnum
#----------------------------------------------------------------------------
# T Y P E L I B R A R I E S
#----------------------------------------------------------------------------
def add_default_til(name):
"""
Load a type library
@param name: name of type library.
@return: 1-ok, 0-failed.
"""
til = ida_typeinf.add_til(name, ida_typeinf.ADDTIL_DEFAULT)
if til:
return 1
else:
return 0
def import_type(idx, type_name):
"""
Copy information from type library to database
Copy structure, union, or enum definition from the type library
to the IDA database.
@param idx: the position of the new type in the list of
types (structures or enums) -1 means at the end of the list
@param type_name: name of type to copy
@return: BADNODE-failed, otherwise the type id (structure id or enum id)
"""
return ida_typeinf.import_type(None, idx, type_name)
def get_type(ea):
"""
Get type of function/variable
@param ea: the address of the object
@return: type string or None if failed
"""
return ida_typeinf.idc_get_type(ea)
def SizeOf(typestr):
"""
Returns the size of the type. It is equivalent to IDC's sizeof().
Use name, tp, fld = idc.parse_decl() ; SizeOf(tp) to retrieve the size
@return: -1 if typestring is not valid otherwise the size of the type
"""
return ida_typeinf.calc_type_size(None, typestr)
def get_tinfo(ea):
"""
Get type information of function/variable as 'typeinfo' object
@param ea: the address of the object
@return: None on failure, or (type, fields) tuple.
"""
return ida_typeinf.idc_get_type_raw(ea)
def get_local_tinfo(ordinal):
"""
Get local type information as 'typeinfo' object
@param ordinal: slot number (1...NumberOfLocalTypes)
@return: None on failure, or (type, fields) tuple.
"""
return ida_typeinf.idc_get_local_type_raw(ordinal)
def guess_type(ea):
"""
Guess type of function/variable
@param ea: the address of the object, can be the structure member id too
@return: type string or None if failed
"""
return ida_typeinf.idc_guess_type(ea)
TINFO_GUESSED = 0x0000 # this is a guessed type
TINFO_DEFINITE = 0x0001 # this is a definite type
TINFO_DELAYFUNC = 0x0002 # if type is a function and no function exists at ea,
# schedule its creation and argument renaming to
# auto-analysis otherwise try to create it immediately
def apply_type(ea, py_type, flags = TINFO_DEFINITE):
"""
Apply the specified type to the address
@param ea: the address of the object
@param py_type: typeinfo tuple (type, fields) as get_tinfo() returns
or tuple (name, type, fields) as parse_decl() returns
or None
if specified as None, then the
item associated with 'ea' will be deleted.
@param flags: combination of TINFO_... constants or 0
@return: Boolean
"""
if py_type is None:
py_type = ""
if isinstance(py_type, ida_idaapi.string_types) and len(py_type) == 0:
pt = (b"", b"")
else:
if len(py_type) == 3:
pt = py_type[1:] # skip name component
else:
pt = py_type
return ida_typeinf.apply_type(None, pt[0], pt[1], ea, flags)
PT_SIL = ida_typeinf.PT_SIL # silent, no messages
PT_NDC = ida_typeinf.PT_NDC # don't decorate names
PT_TYP = ida_typeinf.PT_TYP # return declared type information
PT_VAR = ida_typeinf.PT_VAR # return declared object information
PT_PACKMASK = ida_typeinf.PT_PACKMASK # mask for pack alignment values
PT_HIGH = ida_typeinf.PT_HIGH # assume high level prototypes (with hidden args, etc)
PT_LOWER = ida_typeinf.PT_LOWER # lower the function prototypes
PT_REPLACE = ida_typeinf.PT_REPLACE # replace the old type (used in idc)
PT_RAWARGS = ida_typeinf.PT_RAWARGS # leave argument names unchanged (do not remove underscores)
PT_SILENT = PT_SIL # alias
PT_PAKDEF = 0x0000 # default pack value
PT_PAK1 = 0x0010 # #pragma pack(1)
PT_PAK2 = 0x0020 # #pragma pack(2)
PT_PAK4 = 0x0030 # #pragma pack(4)
PT_PAK8 = 0x0040 # #pragma pack(8)
PT_PAK16 = 0x0050 # #pragma pack(16)
# idc.py-specific
PT_FILE = 0x00010000 # input if a file name (otherwise contains type declarations)
def SetType(ea, newtype):
"""
Set type of function/variable
@param ea: the address of the object
@param newtype: the type string in C declaration form.
Must contain the closing ';'
if specified as an empty string, then the
item associated with 'ea' will be deleted.
@return: 1-ok, 0-failed.
"""
if newtype != '':
pt = parse_decl(newtype, PT_SIL)
if pt is None:
# parsing failed
return None
else:
pt = None
return apply_type(ea, pt, TINFO_DEFINITE)
def parse_decl(inputtype, flags):
"""
Parse type declaration
@param inputtype: file name or C declarations (depending on the flags)
@param flags: combination of PT_... constants or 0
@return: None on failure or (name, type, fields) tuple
"""
if len(inputtype) != 0 and inputtype[-1] != ';':
inputtype = inputtype + ';'
return ida_typeinf.idc_parse_decl(None, inputtype, flags)
def parse_decls(inputtype, flags = 0):
"""
Parse type declarations
@param inputtype: file name or C declarations (depending on the flags)
@param flags: combination of PT_... constants or 0
@return: number of parsing errors (0 no errors)
"""
return ida_typeinf.idc_parse_types(inputtype, flags)
def print_decls(ordinals, flags):
"""
Print types in a format suitable for use in a header file
@param ordinals: comma-separated list of type ordinals
@param flags: combination of PDF_... constants or 0
@return: string containing the type definitions
"""
class def_sink(ida_typeinf.text_sink_t):
def __init__(self):
ida_typeinf.text_sink_t.__init__(self)
self.text = ""
def _print(self, defstr):
self.text += defstr
return 0
sink = def_sink()
py_ordinals = list(map(lambda l : int(l), ordinals.split(",")))
ida_typeinf.print_decls(sink, None, py_ordinals, flags)
return sink.text
PDF_INCL_DEPS = 0x1 # include dependencies
PDF_DEF_FWD = 0x2 # allow forward declarations
PDF_DEF_BASE = 0x4 # include base types: __int8, __int16, etc..
PDF_HEADER_CMT = 0x8 # prepend output with a descriptive comment
def get_ordinal_qty():
"""
Get number of local types + 1
@return: value >= 1. 1 means that there are no local types.
"""
return ida_typeinf.get_ordinal_qty(None)
def set_local_type(ordinal, input, flags):
"""
Parse one type declaration and store it in the specified slot
@param ordinal: slot number (1...NumberOfLocalTypes)
-1 means allocate new slot or reuse the slot
of the existing named type
@param input: C declaration. Empty input empties the slot
@param flags: combination of PT_... constants or 0
@return: slot number or 0 if error
"""
return ida_typeinf.idc_set_local_type(ordinal, input, flags)
def GetLocalType(ordinal, flags):
"""
Retrieve a local type declaration
@param flags: any of PRTYPE_* constants
@return: local type as a C declaration or ""
"""
(type, fields) = get_local_tinfo(ordinal)
if type:
name = get_numbered_type_name(ordinal)
return ida_typeinf.idc_print_type(type, fields, name, flags)
return ""
PRTYPE_1LINE = 0x0000 # print to one line
PRTYPE_MULTI = 0x0001 # print to many lines
PRTYPE_TYPE = 0x0002 # print type declaration (not variable declaration)
PRTYPE_PRAGMA = 0x0004 # print pragmas for alignment
def get_numbered_type_name(ordinal):
"""
Retrieve a local type name
@param ordinal: slot number (1...NumberOfLocalTypes)
returns: local type name or None
"""
return ida_typeinf.idc_get_local_type_name(ordinal)
# ----------------------------------------------------------------------------
# H I D D E N A R E A S
# ----------------------------------------------------------------------------
add_hidden_range = ida_bytes.add_hidden_range
def update_hidden_range(ea, visible):
"""
Set hidden range state
@param ea: any address belonging to the hidden range
@param visible: new state of the range
@return: != 0 - ok
"""
ha = ida_bytes.get_hidden_range(ea)
if not ha:
return 0
else:
ha.visible = visible
return ida_bytes.update_hidden_range(ha)
del_hidden_range = ida_bytes.del_hidden_range
#--------------------------------------------------------------------------
# D E B U G G E R I N T E R F A C E
#--------------------------------------------------------------------------
load_debugger = ida_dbg.load_debugger
start_process = ida_dbg.start_process
exit_process = ida_dbg.exit_process
suspend_process = ida_dbg.suspend_process
get_processes = ida_dbg.get_processes
attach_process = ida_dbg.attach_process
detach_process = ida_dbg.detach_process
get_thread_qty = ida_dbg.get_thread_qty
getn_thread = ida_dbg.getn_thread
get_current_thread = ida_dbg.get_current_thread
getn_thread_name = ida_dbg.getn_thread_name
select_thread = ida_dbg.select_thread
suspend_thread = ida_dbg.suspend_thread
resume_thread = ida_dbg.resume_thread
def _get_modules():
"""
INTERNAL: Enumerate process modules
"""
module = ida_idd.modinfo_t()
result = ida_dbg.get_first_module(module)
while result:
yield module
result = ida_dbg.get_next_module(module)
def get_first_module():
"""
Enumerate process modules
@return: first module's base address or None on failure
"""
for module in _get_modules():
return module.base
else:
return None
def get_next_module(base):
"""
Enumerate process modules
@param base: previous module's base address
@return: next module's base address or None on failure
"""
foundit = False
for module in _get_modules():
if foundit:
return module.base
if module.base == base:
foundit = True
else:
return None
def get_module_name(base):
"""
Get process module name
@param base: the base address of the module
@return: required info or None
"""
for module in _get_modules():
if module.base == base:
return module.name
else:
return 0
def get_module_size(base):
"""
Get process module size
@param base: the base address of the module
@return: required info or -1
"""
for module in _get_modules():
if module.base == base:
return module.size
else:
return -1
step_into = ida_dbg.step_into
step_over = ida_dbg.step_over
run_to = ida_dbg.run_to
step_until_ret = ida_dbg.step_until_ret
wait_for_next_event = ida_dbg.wait_for_next_event
def resume_process():
return wait_for_next_event(WFNE_CONT|WFNE_NOWAIT, 0)
def send_dbg_command(cmd):
"""Sends a command to the debugger module and returns the output string.
An exception will be raised if the debugger is not running or the current debugger does not export
the 'send_dbg_command' IDC command.
"""
s = eval_idc('send_dbg_command("%s");' % ida_kernwin.str2user(cmd))
if s.startswith("IDC_FAILURE"):
raise Exception("Debugger command is available only when the debugger is active!")
return s
# wfne flag is combination of the following:
WFNE_ANY = 0x0001 # return the first event (even if it doesn't suspend the process)
# if the process is still running, the database
# does not reflect the memory state. you might want
# to call refresh_debugger_memory() in this case
WFNE_SUSP = 0x0002 # wait until the process gets suspended
WFNE_SILENT = 0x0004 # 1: be slient, 0:display modal boxes if necessary
WFNE_CONT = 0x0008 # continue from the suspended state
WFNE_NOWAIT = 0x0010 # do not wait for any event, immediately return DEC_TIMEOUT
# (to be used with WFNE_CONT)
# debugger event codes
NOTASK = -2 # process does not exist
DBG_ERROR = -1 # error (e.g. network problems)
DBG_TIMEOUT = 0 # timeout
PROCESS_STARTED = 0x00000001 # New process started
PROCESS_EXITED = 0x00000002 # Process stopped
THREAD_STARTED = 0x00000004 # New thread started
THREAD_EXITED = 0x00000008 # Thread stopped
BREAKPOINT = 0x00000010 # Breakpoint reached
STEP = 0x00000020 # One instruction executed
EXCEPTION = 0x00000040 # Exception
LIB_LOADED = 0x00000080 # New library loaded
LIB_UNLOADED = 0x00000100 # Library unloaded
INFORMATION = 0x00000200 # User-defined information
PROCESS_ATTACHED = 0x00000400 # Attached to running process
PROCESS_DETACHED = 0x00000800 # Detached from process
PROCESS_SUSPENDED = 0x00001000 # Process has been suspended
refresh_debugger_memory = ida_dbg.refresh_debugger_memory
take_memory_snapshot = ida_segment.take_memory_snapshot
get_process_state = ida_dbg.get_process_state
DSTATE_SUSP = -1 # process is suspended
DSTATE_NOTASK = 0 # no process is currently debugged
DSTATE_RUN = 1 # process is running
DSTATE_RUN_WAIT_ATTACH = 2 # process is running, waiting for process properly attached
DSTATE_RUN_WAIT_END = 3 # process is running, but the user asked to kill/detach the process
# remark: in this case, most events are ignored
"""
Get various information about the current debug event
These functions are valid only when the current event exists
(the process is in the suspended state)
"""
# For all events:
def get_event_id():
"""
Get ID of debug event
@return: event ID
"""
ev = ida_dbg.get_debug_event()
assert ev, "Could not retrieve debug event"
return ev.eid()
def get_event_pid():
"""
Get process ID for debug event
@return: process ID
"""
ev = ida_dbg.get_debug_event()
assert ev, "Could not retrieve debug event"
return ev.pid
def get_event_tid():
"""
Get type ID for debug event
@return: type ID
"""
ev = ida_dbg.get_debug_event()
assert ev, "Could not retrieve debug event"
return ev.tid
def get_event_ea():
"""
Get ea for debug event
@return: ea
"""
ev = ida_dbg.get_debug_event()
assert ev, "Could not retrieve debug event"
return ev.ea
def is_event_handled():
"""
Is the debug event handled?
@return: boolean
"""
ev = ida_dbg.get_debug_event()
assert ev, "Could not retrieve debug event"
return ev.handled
# For PROCESS_STARTED, PROCESS_ATTACHED, LIB_LOADED events:
def get_event_module_name():
"""
Get module name for debug event
@return: module name
"""
ev = ida_dbg.get_debug_event()
assert ev, "Could not retrieve debug event"
return ida_idd.get_event_module_name(ev)
def get_event_module_base():
"""
Get module base for debug event
@return: module base
"""
ev = ida_dbg.get_debug_event()
assert ev, "Could not retrieve debug event"
return ida_idd.get_event_module_base(ev)
def get_event_module_size():
"""
Get module size for debug event
@return: module size
"""
ev = ida_dbg.get_debug_event()
assert ev, "Could not retrieve debug event"
return ida_idd.get_event_module_size(ev)
def get_event_exit_code():
"""
Get exit code for debug event
@return: exit code for PROCESS_EXITED, THREAD_EXITED events
"""
ev = ida_dbg.get_debug_event()
assert ev, "Could not retrieve debug event"
return ev.exit_code()
def get_event_info():
"""
Get debug event info
@return: event info: for THREAD_STARTED (thread name)
for LIB_UNLOADED (unloaded library name)
for INFORMATION (message to display)
"""
ev = ida_dbg.get_debug_event()
assert ev, "Could not retrieve debug event"
return ida_idd.get_event_info(ev)
def get_event_bpt_hea():
"""
Get hardware address for BREAKPOINT event
@return: hardware address
"""
ev = ida_dbg.get_debug_event()
assert ev, "Could not retrieve debug event"
return ida_idd.get_event_bpt_hea(ev)
def get_event_exc_code():
"""
Get exception code for EXCEPTION event
@return: exception code
"""
ev = ida_dbg.get_debug_event()
assert ev, "Could not retrieve debug event"
return ida_idd.get_event_exc_code(ev)
def get_event_exc_ea():
"""
Get address for EXCEPTION event
@return: adress of exception
"""
ev = ida_dbg.get_debug_event()
assert ev, "Could not retrieve debug event"
return ida_idd.get_event_exc_ea(ev)
def can_exc_continue():
"""
Can it continue after EXCEPTION event?
@return: boolean
"""
ev = ida_dbg.get_debug_event()
assert ev, "Could not retrieve debug event"
return ida_idd.can_exc_continue(ev)
def get_event_exc_info():
"""
Get info for EXCEPTION event
@return: info string
"""
ev = ida_dbg.get_debug_event()
assert ev, "Could not retrieve debug event"
return ida_idd.get_event_exc_info(ev)
set_debugger_options = ida_dbg.set_debugger_options
DOPT_SEGM_MSGS = 0x00000001 # print messages on debugger segments modifications
DOPT_START_BPT = 0x00000002 # break on process start
DOPT_THREAD_MSGS = 0x00000004 # print messages on thread start/exit
DOPT_THREAD_BPT = 0x00000008 # break on thread start/exit
DOPT_BPT_MSGS = 0x00000010 # print message on breakpoint
DOPT_LIB_MSGS = 0x00000040 # print message on library load/unlad
DOPT_LIB_BPT = 0x00000080 # break on library load/unlad
DOPT_INFO_MSGS = 0x00000100 # print message on debugging information
DOPT_INFO_BPT = 0x00000200 # break on debugging information
DOPT_REAL_MEMORY = 0x00000400 # don't hide breakpoint instructions
DOPT_REDO_STACK = 0x00000800 # reconstruct the stack
DOPT_ENTRY_BPT = 0x00001000 # break on program entry point
DOPT_EXCDLG = 0x00006000 # exception dialogs:
EXCDLG_NEVER = 0x00000000 # never display exception dialogs
EXCDLG_UNKNOWN = 0x00002000 # display for unknown exceptions
EXCDLG_ALWAYS = 0x00006000 # always display
DOPT_LOAD_DINFO = 0x00008000 # automatically load debug files (pdb)
get_debugger_event_cond = ida_dbg.get_debugger_event_cond
set_debugger_event_cond = ida_dbg.set_debugger_event_cond
set_remote_debugger = ida_dbg.set_remote_debugger
define_exception = ida_dbg.define_exception
EXC_BREAK = 0x0001 # break on the exception
EXC_HANDLE = 0x0002 # should be handled by the debugger?
get_reg_value = ida_dbg.get_reg_val
def set_reg_value(value, name):
"""
Set register value
@param name: the register name
@param value: new register value
@note: The debugger should be running
It is not necessary to use this function to set register values.
A register name in the left side of an assignment will do too.
"""
return ida_dbg.set_reg_val(name, value)
get_bpt_qty = ida_dbg.get_bpt_qty
def get_bpt_ea(n):
"""
Get breakpoint address
@param n: number of breakpoint, is in range 0..get_bpt_qty()-1
@return: address of the breakpoint or BADADDR
"""
bpt = ida_dbg.bpt_t()
if ida_dbg.getn_bpt(n, bpt):
return bpt.ea
else:
return BADADDR
def get_bpt_attr(ea, bptattr):
"""
Get the characteristics of a breakpoint
@param ea: any address in the breakpoint range
@param bptattr: the desired attribute code, one of BPTATTR_... constants
@return: the desired attribute value or -1
"""
bpt = ida_dbg.bpt_t()
if not ida_dbg.get_bpt(ea, bpt):
return -1
else:
if bptattr == BPTATTR_EA:
return bpt.ea
if bptattr == BPTATTR_SIZE:
return bpt.size
if bptattr == BPTATTR_TYPE:
return bpt.type
if bptattr == BPTATTR_COUNT:
return bpt.pass_count
if bptattr == BPTATTR_FLAGS:
return bpt.flags
if bptattr == BPTATTR_COND:
return bpt.condition
if bptattr == BPTATTR_PID:
return bpt.pid
if bptattr == BPTATTR_TID:
return bpt.tid
return -1
BPTATTR_EA = 1 # starting address of the breakpoint
BPTATTR_SIZE = 2 # size of the breakpoint (undefined for software breakpoint)
# type of the breakpoint
BPTATTR_TYPE = 3
# Breakpoint types:
BPT_WRITE = 1 # Hardware: Write access
BPT_RDWR = 3 # Hardware: Read/write access
BPT_SOFT = 4 # Software breakpoint
BPT_EXEC = 8 # Hardware: Execute instruction
BPT_DEFAULT = (BPT_SOFT|BPT_EXEC); # Choose bpt type automaticaly
BPTATTR_COUNT = 4
BPTATTR_FLAGS = 5
BPT_BRK = 0x001 # the debugger stops on this breakpoint
BPT_TRACE = 0x002 # the debugger adds trace information when this breakpoint is reached
BPT_UPDMEM = 0x004 # refresh the memory layout and contents before evaluating bpt condition
BPT_ENABLED = 0x008 # enabled?
BPT_LOWCND = 0x010 # condition is calculated at low level (on the server side)
BPT_TRACEON = 0x020 # enable tracing when the breakpoint is reached
BPT_TRACE_INSN = 0x040 # instruction tracing
BPT_TRACE_FUNC = 0x080 # function tracing
BPT_TRACE_BBLK = 0x100 # basic block tracing
BPTATTR_COND = 6 # Breakpoint condition. NOTE: the return value is a string in this case
BPTATTR_PID = 7 # Brekapoint process id
BPTATTR_TID = 8 # Brekapoint thread id
# Breakpoint location type:
BPLT_ABS = 0 # Absolute address. Attributes:
# - locinfo: absolute address
BPLT_REL = 1 # Module relative address. Attributes:
# - locpath: the module path
# - locinfo: offset from the module base address
BPLT_SYM = 2 # Symbolic name. The name will be resolved on DLL load/unload
# events and on naming an address. Attributes:
# - locpath: symbol name
# - locinfo: offset from the symbol base address
def set_bpt_attr(address, bptattr, value):
"""
modifiable characteristics of a breakpoint
@param address: any address in the breakpoint range
@param bptattr: the attribute code, one of BPTATTR_* constants
BPTATTR_CND is not allowed, see set_bpt_cond()
@param value: the attibute value
@return: success
"""
bpt = ida_dbg.bpt_t()
if not ida_dbg.get_bpt(address, bpt):
return False
else:
if bptattr not in [ BPTATTR_SIZE, BPTATTR_TYPE, BPTATTR_FLAGS, BPTATTR_COUNT, BPTATTR_PID, BPTATTR_TID ]:
return False
if bptattr == BPTATTR_SIZE:
bpt.size = value
if bptattr == BPTATTR_TYPE:
bpt.type = value
if bptattr == BPTATTR_COUNT:
bpt.pass_count = value
if bptattr == BPTATTR_FLAGS:
bpt.flags = value
if bptattr == BPTATTR_PID:
bpt.pid = value
if bptattr == BPTATTR_TID:
bpt.tid = value
return ida_dbg.update_bpt(bpt)
def set_bpt_cond(ea, cnd, is_lowcnd=0):
"""
Set breakpoint condition
@param ea: any address in the breakpoint range
@param cnd: breakpoint condition
@param is_lowcnd: 0 - regular condition, 1 - low level condition
@return: success
"""
bpt = ida_dbg.bpt_t()
if not ida_dbg.get_bpt(ea, bpt):
return False
bpt.condition = cnd
if is_lowcnd:
bpt.flags |= BPT_LOWCND
else:
bpt.flags &= ~BPT_LOWCND
return ida_dbg.update_bpt(bpt)
add_bpt = ida_dbg.add_bpt
del_bpt = ida_dbg.del_bpt
enable_bpt = ida_dbg.enable_bpt
check_bpt = ida_dbg.check_bpt
BPTCK_NONE = -1 # breakpoint does not exist
BPTCK_NO = 0 # breakpoint is disabled
BPTCK_YES = 1 # breakpoint is enabled
BPTCK_ACT = 2 # breakpoint is active (written to the process)
def enable_tracing(trace_level, enable):
"""
Enable step tracing
@param trace_level: what kind of trace to modify
@param enable: 0: turn off, 1: turn on
@return: success
"""
assert trace_level in [ TRACE_STEP, TRACE_INSN, TRACE_FUNC ], \
"trace_level must be one of TRACE_* constants"
if trace_level == TRACE_STEP:
return ida_dbg.enable_step_trace(enable)
if trace_level == TRACE_INSN:
return ida_dbg.enable_insn_trace(enable)
if trace_level == TRACE_FUNC:
return ida_dbg.enable_func_trace(enable)
return False
TRACE_STEP = 0x0 # lowest level trace. trace buffers are not maintained
TRACE_INSN = 0x1 # instruction level trace
TRACE_FUNC = 0x2 # function level trace (calls & rets)
get_step_trace_options = ida_dbg.get_step_trace_options
set_step_trace_options = ida_dbg.set_step_trace_options
ST_OVER_DEBUG_SEG = 0x01 # step tracing will be disabled when IP is in a debugger segment
ST_OVER_LIB_FUNC = 0x02 # step tracing will be disabled when IP is in a library function
ST_ALREADY_LOGGED = 0x04 # step tracing will be disabled when IP is already logged
ST_SKIP_LOOPS = 0x08 # step tracing will try to skip loops already recorded
load_trace_file = ida_dbg.load_trace_file
save_trace_file = ida_dbg.save_trace_file
is_valid_trace_file = ida_dbg.is_valid_trace_file
diff_trace_file = ida_dbg.diff_trace_file
def clear_trace(filename):
"""
Clear the current trace buffer
"""
return ida_dbg.clear_trace()
get_trace_file_desc = ida_dbg.get_trace_file_desc
set_trace_file_desc = ida_dbg.set_trace_file_desc
get_tev_qty = ida_dbg.get_tev_qty
get_tev_ea = ida_dbg.get_tev_ea
TEV_NONE = 0 # no event
TEV_INSN = 1 # an instruction trace
TEV_CALL = 2 # a function call trace
TEV_RET = 3 # a function return trace
TEV_BPT = 4 # write, read/write, execution trace
TEV_MEM = 5 # memory layout changed
TEV_EVENT = 6 # debug event
get_tev_type = ida_dbg.get_tev_type
get_tev_tid = ida_dbg.get_tev_tid
get_tev_reg = ida_dbg.get_tev_reg_val
get_tev_mem_qty = ida_dbg.get_tev_reg_mem_qty
get_tev_mem = ida_dbg.get_tev_reg_mem
get_tev_mem_ea = ida_dbg.get_tev_reg_mem_ea
get_call_tev_callee = ida_dbg.get_call_tev_callee
get_ret_tev_return = ida_dbg.get_ret_tev_return
get_bpt_tev_ea = ida_dbg.get_bpt_tev_ea
#--------------------------------------------------------------------------
# C O L O R S
#--------------------------------------------------------------------------
def get_color(ea, what):
"""
Get item color
@param ea: address of the item
@param what: type of the item (one of CIC_* constants)
@return: color code in RGB (hex 0xBBGGRR)
"""
if what not in [ CIC_ITEM, CIC_FUNC, CIC_SEGM ]:
raise ValueError("'what' must be one of CIC_ITEM, CIC_FUNC and CIC_SEGM")
if what == CIC_ITEM:
return ida_nalt.get_item_color(ea)
if what == CIC_FUNC:
func = ida_funcs.get_func(ea)
if func:
return func.color
else:
return DEFCOLOR
if what == CIC_SEGM:
seg = ida_segment.getseg(ea)
if seg:
return seg.color
else:
return DEFCOLOR
# color item codes:
CIC_ITEM = 1 # one instruction or data
CIC_FUNC = 2 # function
CIC_SEGM = 3 # segment
DEFCOLOR = 0xFFFFFFFF # Default color
def set_color(ea, what, color):
"""
Set item color
@param ea: address of the item
@param what: type of the item (one of CIC_* constants)
@param color: new color code in RGB (hex 0xBBGGRR)
@return: success (True or False)
"""
if what not in [ CIC_ITEM, CIC_FUNC, CIC_SEGM ]:
raise ValueError("'what' must be one of CIC_ITEM, CIC_FUNC and CIC_SEGM")
if what == CIC_ITEM:
return ida_nalt.set_item_color(ea, color)
if what == CIC_FUNC:
func = ida_funcs.get_func(ea)
if func:
func.color = color
return bool(ida_funcs.update_func(func))
else:
return False
if what == CIC_SEGM:
seg = ida_segment.getseg(ea)
if seg:
seg.color = color
return bool(seg.update())
else:
return False
#----------------------------------------------------------------------------
# A R M S P E C I F I C
#----------------------------------------------------------------------------
def force_bl_jump(ea):
"""
Some ARM compilers in Thumb mode use BL (branch-and-link)
instead of B (branch) for long jumps, since BL has more range.
By default, IDA tries to determine if BL is a jump or a call.
You can override IDA's decision using commands in Edit/Other menu
(Force BL call/Force BL jump) or the following two functions.
Force BL instruction to be a jump
@param ea: address of the BL instruction
@return: 1-ok, 0-failed
"""
return eval_idc("force_bl_jump(0x%x)"%ea)
def force_bl_call(ea):
"""
Force BL instruction to be a call
@param ea: address of the BL instruction
@return: 1-ok, 0-failed
"""
return eval_idc("force_bl_call(0x%x)"%ea)
#--------------------------------------------------------------------------
def set_flag(off, bit, value):
v = get_inf_attr(off)
if value:
v = v | bit
else:
v = v & ~bit
set_inf_attr(off, v)
# Convenience functions:
def here(): return get_screen_ea()
def is_mapped(ea): return (prev_addr(ea+1)==ea)
ARGV = []
"""The command line arguments passed to IDA via the -S switch."""
# END OF IDC COMPATIBILY CODE
| 31.711593 | 153 | 0.635799 |
from __future__ import print_function
import ida_idaapi
import ida_auto
import ida_dbg
import ida_diskio
import ida_entry
import ida_enum
import ida_expr
import ida_fixup
import ida_frame
import ida_funcs
import ida_gdl
import ida_ida
import ida_idc
import ida_bytes
import ida_idd
import ida_idp
import ida_kernwin
import ida_lines
import ida_loader
import ida_moves
import ida_nalt
import ida_name
import ida_netnode
import ida_offset
import ida_pro
import ida_search
import ida_segment
import ida_segregs
import ida_struct
import ida_typeinf
import ida_ua
import ida_xref
import _ida_idaapi
import os
import re
import struct
import time
import types
import sys
__EA64__ = ida_idaapi.BADADDR == 0xFFFFFFFFFFFFFFFF
WORDMASK = 0xFFFFFFFFFFFFFFFF if __EA64__ else 0xFFFFFFFF
class DeprecatedIDCError(Exception):
pass
__warned_deprecated_proto_confusion = {}
def __warn_once_deprecated_proto_confusion(what, alternative):
if what not in __warned_deprecated_proto_confusion:
print("NOTE: idc.%s is deprecated due to signature confusion with %s. Please use %s instead" % (
what,
alternative,
alternative))
__warned_deprecated_proto_confusion[what] = True
def _IDC_GetAttr(obj, attrmap, attroffs):
if attroffs in attrmap and hasattr(obj, attrmap[attroffs][1]):
return getattr(obj, attrmap[attroffs][1])
else:
errormsg = "attribute with offset %d not found, check the offset and report the problem" % attroffs
raise KeyError(errormsg)
def _IDC_SetAttr(obj, attrmap, attroffs, value):
# check for read-only atributes
if attroffs in attrmap:
if attrmap[attroffs][0]:
raise KeyError("attribute with offset %d is read-only" % attroffs)
elif hasattr(obj, attrmap[attroffs][1]):
return setattr(obj, attrmap[attroffs][1], value)
errormsg = "attribute with offset %d not found, check the offset and report the problem" % attroffs
raise KeyError(errormsg)
BADADDR = ida_idaapi.BADADDR # Not allowed address value
BADSEL = ida_idaapi.BADSEL # Not allowed selector value/number
SIZE_MAX = _ida_idaapi.SIZE_MAX
ida_ida.__set_module_dynattrs(
__name__,
{
"MAXADDR" : (lambda: ida_ida.inf_get_privrange_start_ea(), None),
})
#
# Flag bit definitions (for get_full_flags())
#
MS_VAL = ida_bytes.MS_VAL # Mask for byte value
FF_IVL = ida_bytes.FF_IVL # Byte has value ?
# Do flags contain byte value? (i.e. has the byte a value?)
# if not, the byte is uninitialized.
def has_value(F): return ((F & FF_IVL) != 0) # any defined value?
def byte_value(F):
return (F & MS_VAL)
def is_loaded(ea):
return has_value(get_full_flags(ea)) # any defined value?
MS_CLS = ida_bytes.MS_CLS # Mask for typing
FF_CODE = ida_bytes.FF_CODE # Code ?
FF_DATA = ida_bytes.FF_DATA # Data ?
FF_TAIL = ida_bytes.FF_TAIL # Tail ?
FF_UNK = ida_bytes.FF_UNK # Unknown ?
def is_code(F): return ((F & MS_CLS) == FF_CODE) # is code byte?
def is_data(F): return ((F & MS_CLS) == FF_DATA) # is data byte?
def is_tail(F): return ((F & MS_CLS) == FF_TAIL) # is tail byte?
def is_unknown(F): return ((F & MS_CLS) == FF_UNK) # is unexplored byte?
def is_head(F): return ((F & FF_DATA) != 0) # is start of code/data?
#
# Common bits
#
MS_COMM = ida_bytes.MS_COMM # Mask of common bits
FF_COMM = ida_bytes.FF_COMM # Has comment?
FF_REF = ida_bytes.FF_REF # has references?
FF_LINE = ida_bytes.FF_LINE # Has next or prev cmt lines ?
FF_NAME = ida_bytes.FF_NAME # Has user-defined name ?
FF_LABL = ida_bytes.FF_LABL # Has dummy name?
FF_FLOW = ida_bytes.FF_FLOW # Exec flow from prev instruction?
FF_ANYNAME = FF_LABL | FF_NAME
def is_flow(F): return ((F & FF_FLOW) != 0)
def isExtra(F): return ((F & FF_LINE) != 0)
def isRef(F): return ((F & FF_REF) != 0)
def hasName(F): return ((F & FF_NAME) != 0)
def hasUserName(F): return ((F & FF_ANYNAME) == FF_NAME)
MS_0TYPE = ida_bytes.MS_0TYPE # Mask for 1st arg typing
FF_0VOID = ida_bytes.FF_0VOID # Void (unknown)?
FF_0NUMH = ida_bytes.FF_0NUMH # Hexadecimal number?
FF_0NUMD = ida_bytes.FF_0NUMD # Decimal number?
FF_0CHAR = ida_bytes.FF_0CHAR # Char ('x')?
FF_0SEG = ida_bytes.FF_0SEG # Segment?
FF_0OFF = ida_bytes.FF_0OFF # Offset?
FF_0NUMB = ida_bytes.FF_0NUMB # Binary number?
FF_0NUMO = ida_bytes.FF_0NUMO # Octal number?
FF_0ENUM = ida_bytes.FF_0ENUM # Enumeration?
FF_0FOP = ida_bytes.FF_0FOP # Forced operand?
FF_0STRO = ida_bytes.FF_0STRO # Struct offset?
FF_0STK = ida_bytes.FF_0STK # Stack variable?
MS_1TYPE = ida_bytes.MS_1TYPE # Mask for 2nd arg typing
FF_1VOID = ida_bytes.FF_1VOID # Void (unknown)?
FF_1NUMH = ida_bytes.FF_1NUMH # Hexadecimal number?
FF_1NUMD = ida_bytes.FF_1NUMD # Decimal number?
FF_1CHAR = ida_bytes.FF_1CHAR # Char ('x')?
FF_1SEG = ida_bytes.FF_1SEG # Segment?
FF_1OFF = ida_bytes.FF_1OFF # Offset?
FF_1NUMB = ida_bytes.FF_1NUMB # Binary number?
FF_1NUMO = ida_bytes.FF_1NUMO # Octal number?
FF_1ENUM = ida_bytes.FF_1ENUM # Enumeration?
FF_1FOP = ida_bytes.FF_1FOP # Forced operand?
FF_1STRO = ida_bytes.FF_1STRO # Struct offset?
FF_1STK = ida_bytes.FF_1STK # Stack variable?
# The following macros answer questions like
# 'is the 1st (or 2nd) operand of instruction or data of the given type'?
# Please note that data items use only the 1st operand type (is...0)
def is_defarg0(F): return ((F & MS_0TYPE) != FF_0VOID)
def is_defarg1(F): return ((F & MS_1TYPE) != FF_1VOID)
def isDec0(F): return ((F & MS_0TYPE) == FF_0NUMD)
def isDec1(F): return ((F & MS_1TYPE) == FF_1NUMD)
def isHex0(F): return ((F & MS_0TYPE) == FF_0NUMH)
def isHex1(F): return ((F & MS_1TYPE) == FF_1NUMH)
def isOct0(F): return ((F & MS_0TYPE) == FF_0NUMO)
def isOct1(F): return ((F & MS_1TYPE) == FF_1NUMO)
def isBin0(F): return ((F & MS_0TYPE) == FF_0NUMB)
def isBin1(F): return ((F & MS_1TYPE) == FF_1NUMB)
def is_off0(F): return ((F & MS_0TYPE) == FF_0OFF)
def is_off1(F): return ((F & MS_1TYPE) == FF_1OFF)
def is_char0(F): return ((F & MS_0TYPE) == FF_0CHAR)
def is_char1(F): return ((F & MS_1TYPE) == FF_1CHAR)
def is_seg0(F): return ((F & MS_0TYPE) == FF_0SEG)
def is_seg1(F): return ((F & MS_1TYPE) == FF_1SEG)
def is_enum0(F): return ((F & MS_0TYPE) == FF_0ENUM)
def is_enum1(F): return ((F & MS_1TYPE) == FF_1ENUM)
def is_manual0(F): return ((F & MS_0TYPE) == FF_0FOP)
def is_manual1(F): return ((F & MS_1TYPE) == FF_1FOP)
def is_stroff0(F): return ((F & MS_0TYPE) == FF_0STRO)
def is_stroff1(F): return ((F & MS_1TYPE) == FF_1STRO)
def is_stkvar0(F): return ((F & MS_0TYPE) == FF_0STK)
def is_stkvar1(F): return ((F & MS_1TYPE) == FF_1STK)
#
# Bits for DATA bytes
#
DT_TYPE = ida_bytes.DT_TYPE & 0xFFFFFFFF # Mask for DATA typing
FF_BYTE = ida_bytes.FF_BYTE & 0xFFFFFFFF # byte
FF_WORD = ida_bytes.FF_WORD & 0xFFFFFFFF # word
FF_DWORD = ida_bytes.FF_DWORD & 0xFFFFFFFF # dword
FF_QWORD = ida_bytes.FF_QWORD & 0xFFFFFFFF # qword
FF_TBYTE = ida_bytes.FF_TBYTE & 0xFFFFFFFF # tbyte
FF_STRLIT = ida_bytes.FF_STRLIT & 0xFFFFFFFF # ASCII ?
FF_STRUCT = ida_bytes.FF_STRUCT & 0xFFFFFFFF # Struct ?
FF_OWORD = ida_bytes.FF_OWORD & 0xFFFFFFFF # octaword (16 bytes)
FF_FLOAT = ida_bytes.FF_FLOAT & 0xFFFFFFFF # float
FF_DOUBLE = ida_bytes.FF_DOUBLE & 0xFFFFFFFF # double
FF_PACKREAL = ida_bytes.FF_PACKREAL & 0xFFFFFFFF # packed decimal real
FF_ALIGN = ida_bytes.FF_ALIGN & 0xFFFFFFFF # alignment directive
def is_byte(F): return (is_data(F) and (F & DT_TYPE) == FF_BYTE)
def is_word(F): return (is_data(F) and (F & DT_TYPE) == FF_WORD)
def is_dword(F): return (is_data(F) and (F & DT_TYPE) == FF_DWORD)
def is_qword(F): return (is_data(F) and (F & DT_TYPE) == FF_QWORD)
def is_oword(F): return (is_data(F) and (F & DT_TYPE) == FF_OWORD)
def is_tbyte(F): return (is_data(F) and (F & DT_TYPE) == FF_TBYTE)
def is_float(F): return (is_data(F) and (F & DT_TYPE) == FF_FLOAT)
def is_double(F): return (is_data(F) and (F & DT_TYPE) == FF_DOUBLE)
def is_pack_real(F): return (is_data(F) and (F & DT_TYPE) == FF_PACKREAL)
def is_strlit(F): return (is_data(F) and (F & DT_TYPE) == FF_STRLIT)
def is_struct(F): return (is_data(F) and (F & DT_TYPE) == FF_STRUCT)
def is_align(F): return (is_data(F) and (F & DT_TYPE) == FF_ALIGN)
#
# Bits for CODE bytes
#
MS_CODE = ida_bytes.MS_CODE & 0xFFFFFFFF
FF_FUNC = ida_bytes.FF_FUNC & 0xFFFFFFFF # function start?
FF_IMMD = ida_bytes.FF_IMMD & 0xFFFFFFFF # Has Immediate value ?
FF_JUMP = ida_bytes.FF_JUMP & 0xFFFFFFFF # Has jump table
#
# Loader flags
#
if ida_idaapi.uses_swig_builtins:
_scope = ida_loader.loader_t
else:
_scope = ida_loader
NEF_SEGS = _scope.NEF_SEGS # Create segments
NEF_RSCS = _scope.NEF_RSCS # Load resources
NEF_NAME = _scope.NEF_NAME # Rename entries
NEF_MAN = _scope.NEF_MAN # Manual load
NEF_FILL = _scope.NEF_FILL # Fill segment gaps
NEF_IMPS = _scope.NEF_IMPS # Create imports section
NEF_FIRST = _scope.NEF_FIRST # This is the first file loaded
NEF_CODE = _scope.NEF_CODE # for load_binary_file:
NEF_RELOAD = _scope.NEF_RELOAD # reload the file at the same place:
NEF_FLAT = _scope.NEF_FLAT # Autocreated FLAT group (PE)
# List of built-in functions
# --------------------------
#
# The following conventions are used in this list:
# 'ea' is a linear address
# 'success' is 0 if a function failed, 1 otherwise
# 'void' means that function returns no meaningful value (always 0)
#
# All function parameter conversions are made automatically.
#
# ----------------------------------------------------------------------------
# M I S C E L L A N E O U S
# ----------------------------------------------------------------------------
def value_is_string(var): raise NotImplementedError("this function is not needed in Python")
def value_is_long(var): raise NotImplementedError("this function is not needed in Python")
def value_is_float(var): raise NotImplementedError("this function is not needed in Python")
def value_is_func(var): raise NotImplementedError("this function is not needed in Python")
def value_is_pvoid(var): raise NotImplementedError("this function is not needed in Python")
def value_is_int64(var): raise NotImplementedError("this function is not needed in Python")
def to_ea(seg, off):
return (seg << 4) + off
def form(format, *args):
raise DeprecatedIDCError("form() is deprecated. Use python string operations instead.")
def substr(s, x1, x2):
raise DeprecatedIDCError("substr() is deprecated. Use python string operations instead.")
def strstr(s1, s2):
raise DeprecatedIDCError("strstr() is deprecated. Use python string operations instead.")
def strlen(s):
raise DeprecatedIDCError("strlen() is deprecated. Use python string operations instead.")
def xtol(s):
raise DeprecatedIDCError("xtol() is deprecated. Use python long() instead.")
def atoa(ea):
return ida_kernwin.ea2str(ea)
def ltoa(n, radix):
raise DeprecatedIDCError("ltoa() is deprecated. Use python string operations instead.")
def atol(s):
raise DeprecatedIDCError("atol() is deprecated. Use python long() instead.")
def rotate_left(value, count, nbits, offset):
assert offset >= 0, "offset must be >= 0"
assert nbits > 0, "nbits must be > 0"
mask = 2**(offset+nbits) - 2**offset
tmp = value & mask
if count > 0:
for x in range(count):
if (tmp >> (offset+nbits-1)) & 1:
tmp = (tmp << 1) | (1 << offset)
else:
tmp = (tmp << 1)
else:
for x in range(-count):
if (tmp >> offset) & 1:
tmp = (tmp >> 1) | (1 << (offset+nbits-1))
else:
tmp = (tmp >> 1)
value = (value-(value&mask)) | (tmp & mask)
return value
def rotate_dword(x, count): return rotate_left(x, count, 32, 0)
def rotate_word(x, count): return rotate_left(x, count, 16, 0)
def rotate_byte(x, count): return rotate_left(x, count, 8, 0)
# add_idc_hotkey return codes
IDCHK_OK = 0 # ok
IDCHK_ARG = -1 # bad argument(s)
IDCHK_KEY = -2 # bad hotkey name
IDCHK_MAX = -3 # too many IDC hotkeys
add_idc_hotkey = ida_kernwin.add_idc_hotkey
del_idc_hotkey = ida_kernwin.del_idc_hotkey
jumpto = ida_kernwin.jumpto
auto_wait = ida_auto.auto_wait
def eval_idc(expr):
rv = ida_expr.idc_value_t()
err = ida_expr.eval_idc_expr(rv, BADADDR, expr)
if err:
return "IDC_FAILURE: "+err
else:
if rv.vtype == '\x02': # long
return rv.num
elif rv.vtype == '\x07': # VT_STR
return rv.c_str()
else:
raise NotImplementedError("eval_idc() supports only expressions returning strings or longs")
def EVAL_FAILURE(code):
return type(code) == bytes and code.startswith("IDC_FAILURE: ")
def save_database(idbname, flags=0):
if len(idbname) == 0:
idbname = get_idb_path()
mask = ida_loader.DBFL_KILL | ida_loader.DBFL_COMP | ida_loader.DBFL_BAK
return ida_loader.save_database(idbname, flags & mask)
DBFL_BAK = ida_loader.DBFL_BAK # for compatiblity with older versions, eventually delete this
def validate_idb_names(do_repair = 0):
return ida_nalt.validate_idb_names(do_repair)
qexit = ida_pro.qexit
def call_system(command):
return os.system(command)
def qsleep(milliseconds):
time.sleep(float(milliseconds)/1000)
load_and_run_plugin = ida_loader.load_and_run_plugin
plan_to_apply_idasgn = ida_funcs.plan_to_apply_idasgn
#----------------------------------------------------------------------------
# C H A N G E P R O G R A M R E P R E S E N T A T I O N
#----------------------------------------------------------------------------
def delete_all_segments():
ea = ida_ida.cvar.inf.min_ea
# Brute-force nuke all info from all the heads
while ea != BADADDR and ea <= ida_ida.cvar.inf.max_ea:
ida_name.del_local_name(ea)
ida_name.del_global_name(ea)
func = ida_funcs.get_func(ea)
if func:
ida_funcs.set_func_cmt(func, "", False)
ida_funcs.set_func_cmt(func, "", True)
ida_funcs.del_func(ea)
ida_bytes.del_hidden_range(ea)
seg = ida_segment.getseg(ea)
if seg:
ida_segment.set_segment_cmt(seg, "", False)
ida_segment.set_segment_cmt(seg, "", True)
ida_segment.del_segm(ea, ida_segment.SEGMOD_KEEP | ida_segment.SEGMOD_SILENT)
ea = ida_bytes.next_head(ea, ida_ida.cvar.inf.max_ea)
create_insn = ida_ua.create_insn
def plan_and_wait(sEA, eEA, final_pass=True):
return ida_auto.plan_and_wait(sEA, eEA, final_pass)
def set_name(ea, name, flags=ida_name.SN_CHECK):
return ida_name.set_name(ea, name, flags)
SN_CHECK = ida_name.SN_CHECK
SN_NOCHECK = ida_name.SN_NOCHECK # Don't fail if the name contains invalid characters.
SN_PUBLIC = ida_name.SN_PUBLIC
SN_NON_PUBLIC = ida_name.SN_NON_PUBLIC
SN_WEAK = ida_name.SN_WEAK
SN_NON_WEAK = ida_name.SN_NON_WEAK
SN_AUTO = ida_name.SN_AUTO
SN_NON_AUTO = ida_name.SN_NON_AUTO
SN_NOLIST = ida_name.SN_NOLIST
SN_NOWARN = ida_name.SN_NOWARN
SN_LOCAL = ida_name.SN_LOCAL # create local name. a function should exist.
# local names can't be public or weak.
set_cmt = ida_bytes.set_cmt
def make_array(ea, nitems):
flags = ida_bytes.get_flags(ea)
if ida_bytes.is_code(flags) or ida_bytes.is_tail(flags) or ida_bytes.is_align(flags):
return False
if ida_bytes.is_unknown(flags):
flags = ida_bytes.FF_BYTE
if ida_bytes.is_struct(flags):
ti = ida_nalt.opinfo_t()
assert ida_bytes.get_opinfo(ti, ea, 0, flags), "get_opinfo() failed"
itemsize = ida_bytes.get_data_elsize(ea, flags, ti)
tid = ti.tid
else:
itemsize = ida_bytes.get_item_size(ea)
tid = BADADDR
return ida_bytes.create_data(ea, flags, itemsize*nitems, tid)
def create_strlit(ea, endea):
return ida_bytes.create_strlit(ea, 0 if endea == BADADDR else endea - ea, get_inf_attr(INF_STRTYPE))
create_data = ida_bytes.create_data
def create_byte(ea):
return ida_bytes.create_byte(ea, 1)
def create_word(ea):
return ida_bytes.create_word(ea, 2)
def create_dword(ea):
return ida_bytes.create_dword(ea, 4)
def create_qword(ea):
return ida_bytes.create_qword(ea, 8)
def create_oword(ea):
return ida_bytes.create_oword(ea, 16)
def create_yword(ea):
return ida_bytes.create_yword(ea, 32)
def create_float(ea):
return ida_bytes.create_float(ea, 4)
def create_double(ea):
return ida_bytes.create_double(ea, 8)
def create_pack_real(ea):
return ida_bytes.create_packed_real(ea, ida_idp.ph_get_tbyte_size())
def create_tbyte(ea):
return ida_bytes.create_tbyte(ea, ida_idp.ph_get_tbyte_size())
def create_struct(ea, size, strname):
strid = ida_struct.get_struc_id(strname)
if size == -1:
size = ida_struct.get_struc_size(strid)
return ida_bytes.create_struct(ea, size, strid)
create_custom_data = ida_bytes.create_custdata
create_align = ida_bytes.create_align
def define_local_var(start, end, location, name):
func = ida_funcs.get_func(start)
if not func:
return 0
# Find out if location is in the [bp+xx] form
r = re.compile("\[([a-z]+)([-+][0-9a-fx]+)", re.IGNORECASE)
m = r.match(location)
if m:
# Location in the form of [bp+xx]
register = ida_idp.str2reg(m.group(1))
if register == -1:
return 0
offset = int(m.group(2), 0)
return 1 if ida_frame.define_stkvar(func, name, offset, ida_bytes.byte_flag(), None, 1) else 0
else:
# Location as simple register name
return ida_frame.add_regvar(func, start, end, location, name, None)
del_items = ida_bytes.del_items
DELIT_SIMPLE = ida_bytes.DELIT_SIMPLE # simply undefine the specified item
DELIT_EXPAND = ida_bytes.DELIT_EXPAND # propogate undefined items, for example
# if removing an instruction removes all
# references to the next instruction, then
# plan to convert to unexplored the next
# instruction too.
DELIT_DELNAMES = ida_bytes.DELIT_DELNAMES # delete any names at the specified address(es)
def set_array_params(ea, flags, litems, align):
return eval_idc("set_array_params(0x%X, 0x%X, %d, %d)"%(ea, flags, litems, align))
AP_ALLOWDUPS = 0x00000001 # use 'dup' construct
AP_SIGNED = 0x00000002 # treats numbers as signed
AP_INDEX = 0x00000004 # display array element indexes as comments
AP_ARRAY = 0x00000008 # reserved (this flag is not stored in database)
AP_IDXBASEMASK = 0x000000F0 # mask for number base of the indexes
AP_IDXDEC = 0x00000000 # display indexes in decimal
AP_IDXHEX = 0x00000010 # display indexes in hex
AP_IDXOCT = 0x00000020 # display indexes in octal
AP_IDXBIN = 0x00000030 # display indexes in binary
op_bin = ida_bytes.op_bin
op_oct = ida_bytes.op_oct
op_dec = ida_bytes.op_dec
op_hex = ida_bytes.op_hex
op_chr = ida_bytes.op_chr
def op_plain_offset(ea, n, base):
if base == BADADDR:
return ida_bytes.clr_op_type(ea, n)
else:
return ida_offset.op_plain_offset(ea, n, base)
OPND_OUTER = ida_bytes.OPND_OUTER # outer offset base
op_offset = ida_offset.op_offset
REF_OFF8 = ida_nalt.REF_OFF8 # 8bit full offset
REF_OFF16 = ida_nalt.REF_OFF16 # 16bit full offset
REF_OFF32 = ida_nalt.REF_OFF32 # 32bit full offset
REF_LOW8 = ida_nalt.REF_LOW8 # low 8bits of 16bit offset
REF_LOW16 = ida_nalt.REF_LOW16 # low 16bits of 32bit offset
REF_HIGH8 = ida_nalt.REF_HIGH8 # high 8bits of 16bit offset
REF_HIGH16 = ida_nalt.REF_HIGH16 # high 16bits of 32bit offset
REF_OFF64 = ida_nalt.REF_OFF64 # 64bit full offset
REFINFO_RVA = 0x10 # based reference (rva)
REFINFO_PASTEND = 0x20 # reference past an item it may point to an nonexistitng
# do not destroy alignment dirs
REFINFO_NOBASE = 0x80 # offset base is a number
# that base have be any value
# nb: base xrefs are created only if base
# points to the middle of a segment
REFINFO_SUBTRACT = 0x0100 # the reference value is subtracted from
# the base value instead of (as usual)
# being added to it
REFINFO_SIGNEDOP = 0x0200 # the operand value is sign-extended (only
# supported for REF_OFF8/16/32/64)
op_seg = ida_bytes.op_seg
op_num = ida_bytes.op_num
op_flt = ida_bytes.op_flt
op_man = ida_bytes.set_forced_operand
toggle_sign = ida_bytes.toggle_sign
def toggle_bnot(ea, n):
ida_bytes.toggle_bnot(ea, n)
return True
op_enum = ida_bytes.op_enum
def op_stroff(ea, n, strid, delta):
path = ida_pro.tid_array(1)
path[0] = strid
if isinstance(ea, ida_ua.insn_t):
insn = ea
else:
insn = ida_ua.insn_t()
ida_ua.decode_insn(insn, ea)
return ida_bytes.op_stroff(insn, n, path.cast(), 1, delta)
op_stkvar = ida_bytes.op_stkvar
def op_offset_high16(ea, n, target):
return ida_offset.op_offset(ea, n, ida_nalt.REF_HIGH16, target)
def MakeVar(ea):
pass
# Every anterior/posterior line has its number.
# Anterior lines have numbers from E_PREV
# Posterior lines have numbers from E_NEXT
E_PREV = ida_lines.E_PREV
E_NEXT = ida_lines.E_NEXT
get_extra_cmt = ida_lines.get_extra_cmt
update_extra_cmt = ida_lines.update_extra_cmt
del_extra_cmt = ida_lines.del_extra_cmt
set_manual_insn = ida_bytes.set_manual_insn
get_manual_insn = ida_bytes.get_manual_insn
patch_dbg_byte = ida_dbg.put_dbg_byte
patch_byte = ida_bytes.patch_byte
patch_word = ida_bytes.patch_word
patch_dword = ida_bytes.patch_dword
patch_qword = ida_bytes.patch_qword
SR_inherit = 1 # value is inherited from the previous range
SR_user = 2 # value is specified by the user
SR_auto = 3 # value is determined by IDA
SR_autostart = 4 # as SR_auto for segment starting address
def split_sreg_range(ea, reg, value, tag=SR_user):
reg = ida_idp.str2reg(reg);
if reg >= 0:
return ida_segregs.split_sreg_range(ea, reg, value, tag)
else:
return False
auto_mark_range = ida_auto.auto_mark_range
auto_unmark = ida_auto.auto_unmark
def AutoMark(ea,qtype):
return auto_mark_range(ea,ea+1,qtype)
AU_UNK = ida_auto.AU_UNK # make unknown
AU_CODE = ida_auto.AU_CODE # convert to instruction
AU_PROC = ida_auto.AU_PROC # make function
AU_USED = ida_auto.AU_USED # reanalyze
AU_LIBF = ida_auto.AU_LIBF # apply a flirt signature (the current signature!)
AU_FINAL = ida_auto.AU_FINAL # coagulate unexplored items
#----------------------------------------------------------------------------
# P R O D U C E O U T P U T F I L E S
#----------------------------------------------------------------------------
def gen_file(filetype, path, ea1, ea2, flags):
f = ida_diskio.fopenWB(path)
if f:
retval = ida_loader.gen_file(filetype, f, ea1, ea2, flags)
ida_diskio.eclose(f)
return retval
else:
return -1
# output file types:
OFILE_MAP = ida_loader.OFILE_MAP
OFILE_EXE = ida_loader.OFILE_EXE
OFILE_IDC = ida_loader.OFILE_IDC
OFILE_LST = ida_loader.OFILE_LST
OFILE_ASM = ida_loader.OFILE_ASM
OFILE_DIF = ida_loader.OFILE_DIF
# output control flags:
GENFLG_MAPSEG = ida_loader.GENFLG_MAPSEG # map: generate map of segments
GENFLG_MAPNAME = ida_loader.GENFLG_MAPNAME # map: include dummy names
GENFLG_MAPDMNG = ida_loader.GENFLG_MAPDMNG # map: demangle names
GENFLG_MAPLOC = ida_loader.GENFLG_MAPLOC # map: include local names
GENFLG_IDCTYPE = ida_loader.GENFLG_IDCTYPE # idc: gen only information about types
GENFLG_ASMTYPE = ida_loader.GENFLG_ASMTYPE # asm&lst: gen information about types too
GENFLG_GENHTML = ida_loader.GENFLG_GENHTML # asm&lst: generate html (gui version only)
GENFLG_ASMINC = ida_loader.GENFLG_ASMINC # asm&lst: gen information only about types
def gen_flow_graph(outfile, title, ea1, ea2, flags):
return ida_gdl.gen_flow_graph(outfile, title, None, ea1, ea2, flags)
CHART_PRINT_NAMES = 0x1000 # print labels for each block?
CHART_GEN_GDL = 0x4000 # generate .gdl file (file extension is forced to .gdl)
CHART_WINGRAPH = 0x8000 # call wingraph32 to display the graph
CHART_NOLIBFUNCS = 0x0400 # don't include library functions in the graph
def gen_simple_call_chart(outfile, title, flags):
return ida_gdl.gen_simple_call_chart(outfile, "Generating chart", title, flags)
def idadir():
return ida_diskio.idadir("")
get_root_filename = ida_nalt.get_root_filename
get_input_file_path = ida_nalt.get_input_file_path
set_root_filename = ida_nalt.set_root_filename
def get_idb_path():
return ida_loader.get_path(ida_loader.PATH_TYPE_IDB)
retrieve_input_file_md5 = ida_nalt.retrieve_input_file_md5
get_full_flags = ida_bytes.get_full_flags
get_db_byte = ida_bytes.get_db_byte
def get_bytes(ea, size, use_dbg = False):
if use_dbg:
return ida_idd.dbg_read_memory(ea, size)
else:
return ida_bytes.get_bytes(ea, size)
get_wide_byte = ida_bytes.get_wide_byte
def __DbgValue(ea, len):
if len not in ida_idaapi.__struct_unpack_table:
return None
r = ida_idd.dbg_read_memory(ea, len)
return None if r is None else struct.unpack((">" if ida_ida.cvar.inf.is_be() else "<") + ida_idaapi.__struct_unpack_table[len][1], r)[0]
def read_dbg_byte(ea):
return __DbgValue(ea, 1)
def read_dbg_word(ea):
return __DbgValue(ea, 2)
def read_dbg_dword(ea):
return __DbgValue(ea, 4)
def read_dbg_qword(ea):
return __DbgValue(ea, 8)
read_dbg_memory = ida_idd.dbg_read_memory
def write_dbg_memory(ea, data):
__warn_once_deprecated_proto_confusion("write_dbg_memory", "ida_dbg.write_dbg_memory")
if not ida_dbg.dbg_can_query():
return -1
elif len(data) > 0:
return ida_idd.dbg_write_memory(ea, data)
get_original_byte = ida_bytes.get_original_byte
get_wide_word = ida_bytes.get_wide_word
get_wide_dword = ida_bytes.get_wide_dword
get_qword = ida_bytes.get_qword
def GetFloat(ea):
tmp = struct.pack("I", get_wide_dword(ea))
return struct.unpack("f", tmp)[0]
def GetDouble(ea):
tmp = struct.pack("Q", get_qword(ea))
return struct.unpack("d", tmp)[0]
def get_name_ea_simple(name):
return ida_name.get_name_ea(BADADDR, name)
get_name_ea = ida_name.get_name_ea
def get_segm_by_sel(base):
sel = ida_segment.find_selector(base)
seg = ida_segment.get_segm_by_sel(sel)
if seg:
return seg.start_ea
else:
return BADADDR
get_screen_ea = ida_kernwin.get_screen_ea
def get_curline():
return ida_lines.tag_remove(ida_kernwin.get_curline())
def read_selection_start():
selection, startaddr, endaddr = ida_kernwin.read_range_selection(None)
if selection == 1:
return startaddr
else:
return BADADDR
def read_selection_end():
selection, startaddr, endaddr = ida_kernwin.read_range_selection(None)
if selection == 1:
return endaddr
else:
return BADADDR
def get_sreg(ea, reg):
reg = ida_idp.str2reg(reg);
if reg >= 0:
return ida_segregs.get_sreg(ea, reg)
else:
return -1
next_addr = ida_bytes.next_addr
prev_addr = ida_bytes.prev_addr
def next_head(ea, maxea=BADADDR):
return ida_bytes.next_head(ea, maxea)
def prev_head(ea, minea=0):
return ida_bytes.prev_head(ea, minea)
next_not_tail = ida_bytes.next_not_tail
prev_not_tail = ida_bytes.prev_not_tail
get_item_head = ida_bytes.get_item_head
get_item_end = ida_bytes.get_item_end
def get_item_size(ea):
return ida_bytes.get_item_end(ea) - ea
def func_contains(func_ea, ea):
func = ida_funcs.get_func(func_ea)
if func:
return ida_funcs.func_contains(func, ea)
return False
GN_VISIBLE = ida_name.GN_VISIBLE
GN_COLORED = ida_name.GN_COLORED
GN_DEMANGLED = ida_name.GN_DEMANGLED
GN_STRICT = ida_name.GN_STRICT
GN_SHORT = ida_name.GN_SHORT
GN_LONG = ida_name.GN_LONG
GN_LOCAL = ida_name.GN_LOCAL
GN_ISRET = ida_name.GN_ISRET
GN_NOT_ISRET = ida_name.GN_NOT_ISRET
calc_gtn_flags = ida_name.calc_gtn_flags
def get_name(ea, gtn_flags=0):
return ida_name.get_ea_name(ea, gtn_flags)
def demangle_name(name, disable_mask):
return ida_name.demangle_name(name, disable_mask, ida_name.DQT_FULL)
def generate_disasm_line(ea, flags):
text = ida_lines.generate_disasm_line(ea, flags)
if text:
return ida_lines.tag_remove(text)
else:
return ""
GENDSM_FORCE_CODE = ida_lines.GENDSM_FORCE_CODE
GENDSM_MULTI_LINE = ida_lines.GENDSM_MULTI_LINE
def GetDisasm(ea):
return generate_disasm_line(ea, 0)
def print_insn_mnem(ea):
res = ida_ua.ua_mnem(ea)
if not res:
return ""
else:
return res
def print_operand(ea, n):
res = ida_ua.print_operand(ea, n)
if not res:
return ""
else:
return ida_lines.tag_remove(res)
def get_operand_type(ea, n):
insn = ida_ua.insn_t()
inslen = ida_ua.decode_insn(insn, ea)
return -1 if inslen == 0 else insn.ops[n].type
o_void = ida_ua.o_void
o_reg = ida_ua.o_reg
o_mem = ida_ua.o_mem
o_phrase = ida_ua.o_phrase
o_displ = ida_ua.o_displ
o_imm = ida_ua.o_imm
o_far = ida_ua.o_far
o_near = ida_ua.o_near
o_idpspec0 = ida_ua.o_idpspec0
o_idpspec1 = ida_ua.o_idpspec1
o_idpspec2 = ida_ua.o_idpspec2
o_idpspec3 = ida_ua.o_idpspec3
o_idpspec4 = ida_ua.o_idpspec4
o_idpspec5 = ida_ua.o_idpspec5
o_trreg = ida_ua.o_idpspec0
o_dbreg = ida_ua.o_idpspec1
o_crreg = ida_ua.o_idpspec2
o_fpreg = ida_ua.o_idpspec3
o_mmxreg = ida_ua.o_idpspec4
o_xmmreg = ida_ua.o_idpspec5
o_reglist = ida_ua.o_idpspec1
o_creglist = ida_ua.o_idpspec2
o_creg = ida_ua.o_idpspec3
o_fpreglist = ida_ua.o_idpspec4
o_text = ida_ua.o_idpspec5
o_cond = (ida_ua.o_idpspec5+1)
o_spr = ida_ua.o_idpspec0
o_twofpr = ida_ua.o_idpspec1
o_shmbme = ida_ua.o_idpspec2
o_crf = ida_ua.o_idpspec3
o_crb = ida_ua.o_idpspec4
o_dcr = ida_ua.o_idpspec5
def get_operand_value(ea, n):
insn = ida_ua.insn_t()
inslen = ida_ua.decode_insn(insn, ea)
if inslen == 0:
return -1
op = insn.ops[n]
if not op:
return -1
if op.type in [ ida_ua.o_mem, ida_ua.o_far, ida_ua.o_near, ida_ua.o_displ ]:
value = op.addr
elif op.type == ida_ua.o_reg:
value = op.reg
elif op.type == ida_ua.o_imm:
value = op.value
elif op.type == ida_ua.o_phrase:
value = op.phrase
else:
value = -1
return value
GetCommentEx = ida_bytes.get_cmt
get_cmt = GetCommentEx
get_forced_operand = ida_bytes.get_forced_operand
BPU_1B = ida_nalt.BPU_1B
BPU_2B = ida_nalt.BPU_2B
BPU_4B = ida_nalt.BPU_4B
STRWIDTH_1B = ida_nalt.STRWIDTH_1B
STRWIDTH_2B = ida_nalt.STRWIDTH_2B
STRWIDTH_4B = ida_nalt.STRWIDTH_4B
STRWIDTH_MASK = ida_nalt.STRWIDTH_MASK
STRLYT_TERMCHR = ida_nalt.STRLYT_TERMCHR
STRLYT_PASCAL1 = ida_nalt.STRLYT_PASCAL1
STRLYT_PASCAL2 = ida_nalt.STRLYT_PASCAL2
STRLYT_PASCAL4 = ida_nalt.STRLYT_PASCAL4
STRLYT_MASK = ida_nalt.STRLYT_MASK
STRLYT_SHIFT = ida_nalt.STRLYT_SHIFT
STRTYPE_TERMCHR = ida_nalt.STRTYPE_TERMCHR
STRTYPE_C = ida_nalt.STRTYPE_C
STRTYPE_C_16 = ida_nalt.STRTYPE_C_16
STRTYPE_C_32 = ida_nalt.STRTYPE_C_32
STRTYPE_PASCAL = ida_nalt.STRTYPE_PASCAL
STRTYPE_PASCAL_16 = ida_nalt.STRTYPE_PASCAL_16
STRTYPE_LEN2 = ida_nalt.STRTYPE_LEN2
STRTYPE_LEN2_16 = ida_nalt.STRTYPE_LEN2_16
STRTYPE_LEN4 = ida_nalt.STRTYPE_LEN4
STRTYPE_LEN4_16 = ida_nalt.STRTYPE_LEN4_16
STRTYPE_C16 = STRTYPE_C_16
def get_strlit_contents(ea, length = -1, strtype = STRTYPE_C):
if length == -1:
length = ida_bytes.get_max_strlit_length(ea, strtype, ida_bytes.ALOPT_IGNHEADS)
return ida_bytes.get_strlit_contents(ea, length, strtype)
def get_str_type(ea):
flags = ida_bytes.get_flags(ea)
if ida_bytes.is_strlit(flags):
oi = ida_nalt.opinfo_t()
if ida_bytes.get_opinfo(oi, ea, 0, flags):
return oi.strtype
find_suspop = ida_search.find_suspop
find_code = ida_search.find_code
find_data = ida_search.find_data
find_unknown = ida_search.find_unknown
find_defined = ida_search.find_defined
find_imm = ida_search.find_imm
SEARCH_UP = ida_search.SEARCH_UP
SEARCH_DOWN = ida_search.SEARCH_DOWN
SEARCH_NEXT = ida_search.SEARCH_NEXT
SEARCH_CASE = ida_search.SEARCH_CASE
SEARCH_REGEX = ida_search.SEARCH_REGEX
SEARCH_NOBRK = ida_search.SEARCH_NOBRK
SEARCH_NOSHOW = ida_search.SEARCH_NOSHOW # don't display the search progress
def find_text(ea, flag, y, x, searchstr):
__warn_once_deprecated_proto_confusion("find_text", "ida_search.find_text")
return ida_search.find_text(ea, y, x, searchstr, flag)
def find_binary(ea, flag, searchstr, radix=16):
__warn_once_deprecated_proto_confusion("find_binary", "ida_search.find_binary")
endea = flag & 1 and ida_ida.cvar.inf.max_ea or ida_ida.cvar.inf.min_ea
return ida_search.find_binary(ea, endea, searchstr, radix, flag)
def process_config_line(directive):
return eval_idc('process_config_directive("%s")' % ida_kernwin.str2user(directive))
INF_VERSION = 0
INF_PROCNAME = 1
INF_GENFLAGS = 2
INF_LFLAGS = 3
INF_DATABASE_CHANGE_COUNT= 4
INF_CHANGE_COUNTER=INF_DATABASE_CHANGE_COUNT
INF_FILETYPE = 5
FT_EXE_OLD = 0
FT_COM_OLD = 1
FT_BIN = 2
FT_DRV = 3
FT_WIN = 4
FT_HEX = 5
FT_MEX = 6
FT_LX = 7
FT_LE = 8
FT_NLM = 9
FT_COFF = 10
FT_PE = 11
FT_OMF = 12
FT_SREC = 13
FT_ZIP = 14
FT_OMFLIB = 15
FT_AR = 16
FT_LOADER = 17
FT_ELF = 18
FT_W32RUN = 19
FT_AOUT = 20
FT_PRC = 21
FT_EXE = 22
FT_COM = 23
FT_AIXAR = 24
FT_MACHO = 25
INF_OSTYPE = 6
OSTYPE_MSDOS= 0x0001
OSTYPE_WIN = 0x0002
OSTYPE_OS2 = 0x0004
OSTYPE_NETW = 0x0008
INF_APPTYPE = 7
APPT_CONSOLE= 0x0001
APPT_GRAPHIC= 0x0002
APPT_PROGRAM= 0x0004
APPT_LIBRARY= 0x0008
APPT_DRIVER = 0x0010
APPT_1THREAD= 0x0020
APPT_MTHREAD= 0x0040
APPT_16BIT = 0x0080
APPT_32BIT = 0x0100
INF_ASMTYPE = 8
INF_SPECSEGS = 9
INF_AF = 10
def _import_module_flag_sets(module, prefixes):
if isinstance(prefixes, str):
prefixes = [prefixes]
for prefix in prefixes:
for key in dir(module):
if key.startswith(prefix):
value = getattr(module, key)
if isinstance(value, ida_idaapi.integer_types):
globals()[key] = value
_import_module_flag_sets(
ida_ida,
[
"INFFL_",
"LFLG_",
"IDB_",
"AF_",
"AF2_",
"SW_",
"NM_",
"DEMNAM_",
"LN_",
"OFLG_",
"SCF_",
"LMT_",
"PREF_",
"STRF_",
"ABI_",
])
INF_AF2 = 11
INF_BASEADDR = 12
INF_START_SS = 13
INF_START_CS = 14
INF_START_IP = 15
INF_START_EA = 16
INF_START_SP = 17
INF_MAIN = 18
INF_MIN_EA = 19
INF_MAX_EA = 20
INF_OMIN_EA = 21
INF_OMAX_EA = 22
INF_LOWOFF = 23
INF_LOW_OFF=INF_LOWOFF
INF_HIGHOFF = 24
INF_HIGH_OFF=INF_HIGHOFF
INF_MAXREF = 25
INF_PRIVRANGE_START_EA = 27
INF_START_PRIVRANGE=INF_PRIVRANGE_START_EA
INF_PRIVRANGE_END_EA = 28
INF_END_PRIVRANGE=INF_PRIVRANGE_END_EA
INF_NETDELTA = 29
INF_XREFNUM = 30
INF_TYPE_XREFNUM = 31 # char; Number of references to generate
# in the struct & enum windows
# 0 - xrefs won't be generated at all
INF_TYPE_XREFS=INF_TYPE_XREFNUM
INF_REFCMTNUM = 32
# generated at all
INF_REFCMTS=INF_REFCMTNUM
INF_XREFFLAG = 33 # char; xrefs representation:
INF_XREFS=INF_XREFFLAG
# NAMES
INF_MAX_AUTONAME_LEN = 34 # ushort; max name length (without zero byte)
INF_NAMETYPE = 35 # char; dummy names represenation type
INF_SHORT_DEMNAMES = 36 # int32; short form of demangled names
INF_SHORT_DN=INF_SHORT_DEMNAMES
INF_LONG_DEMNAMES = 37 # int32; long form of demangled names
# see demangle.h for definitions
INF_LONG_DN=INF_LONG_DEMNAMES
INF_DEMNAMES = 38 # char; display demangled names as:
INF_LISTNAMES = 39 # uchar; What names should be included in the list?
# DISASSEMBLY LISTING DETAILS
INF_INDENT = 40 # char; Indention for instructions
INF_CMT_INDENT = 41 # char; Indention for comments
INF_COMMENT = 41 # for compatibility
INF_MARGIN = 42 # ushort; max length of data lines
INF_LENXREF = 43 # ushort; max length of line with xrefs
INF_OUTFLAGS = 44 # uint32; output flags
INF_CMTFLG = 45 # char; comments:
INF_CMTFLAG=INF_CMTFLG
INF_LIMITER = 46 # char; Generate borders?
INF_BORDER=INF_LIMITER
INF_BIN_PREFIX_SIZE = 47 # short; # of instruction bytes to show
# in line prefix
INF_BINPREF=INF_BIN_PREFIX_SIZE
INF_PREFFLAG = 48 # char; line prefix type:
# STRING LITERALS
INF_STRLIT_FLAGS= 49 # uchar; string literal flags
INF_STRLIT_BREAK= 50 # char; string literal line break symbol
INF_STRLIT_ZEROES= 51 # char; leading zeroes
INF_STRTYPE = 52 # int32; current ascii string type
# is considered as several bytes:
# low byte:
INF_STRLIT_PREF = 53 # char[16];ASCII names prefix
INF_STRLIT_SERNUM= 54 # uint32; serial number
# DATA ITEMS
INF_DATATYPES = 55 # int32; data types allowed in data carousel
# COMPILER
INF_CC_ID = 57 # uchar; compiler
COMP_MASK = 0x0F # mask to apply to get the pure compiler id
COMP_UNK = 0x00 # Unknown
COMP_MS = 0x01 # Visual C++
COMP_BC = 0x02 # Borland C++
COMP_WATCOM = 0x03 # Watcom C++
COMP_GNU = 0x06 # GNU C++
COMP_VISAGE = 0x07 # Visual Age C++
COMP_BP = 0x08 # Delphi
INF_CC_CM = 58 # uchar; memory model & calling convention
INF_CC_SIZE_I = 59 # uchar; sizeof(int)
INF_CC_SIZE_B = 60 # uchar; sizeof(bool)
INF_CC_SIZE_E = 61 # uchar; sizeof(enum)
INF_CC_DEFALIGN = 62 # uchar; default alignment
INF_CC_SIZE_S = 63
INF_CC_SIZE_L = 64
INF_CC_SIZE_LL = 65
INF_CC_SIZE_LDBL = 66 # uchar; sizeof(long double)
INF_COMPILER = INF_CC_ID
INF_MODEL = INF_CC_CM
INF_SIZEOF_INT = INF_CC_SIZE_I
INF_SIZEOF_BOOL = INF_CC_SIZE_B
INF_SIZEOF_ENUM = INF_CC_SIZE_E
INF_SIZEOF_ALGN = INF_CC_DEFALIGN
INF_SIZEOF_SHORT= INF_CC_SIZE_S
INF_SIZEOF_LONG = INF_CC_SIZE_L
INF_SIZEOF_LLONG= INF_CC_SIZE_LL
INF_SIZEOF_LDBL = INF_CC_SIZE_LDBL
INF_ABIBITS= 67 # uint32; ABI features
INF_APPCALL_OPTIONS= 68 # uint32; appcall options
_INF_attrs_accessors = {
INF_ABIBITS : (ida_ida.inf_get_abibits, ida_ida.inf_set_abibits),
INF_AF : (ida_ida.inf_get_af, ida_ida.inf_set_af),
INF_AF2 : (ida_ida.inf_get_af2, ida_ida.inf_set_af2),
INF_APPCALL_OPTIONS : (ida_ida.inf_get_appcall_options, ida_ida.inf_set_appcall_options),
INF_APPTYPE : (ida_ida.inf_get_apptype, ida_ida.inf_set_apptype),
INF_ASMTYPE : (ida_ida.inf_get_asmtype, ida_ida.inf_set_asmtype),
INF_BASEADDR : (ida_ida.inf_get_baseaddr, ida_ida.inf_set_baseaddr),
INF_BIN_PREFIX_SIZE : (ida_ida.inf_get_bin_prefix_size, ida_ida.inf_set_bin_prefix_size),
INF_CC_CM : (ida_ida.inf_get_cc_cm, ida_ida.inf_set_cc_cm),
INF_CC_DEFALIGN : (ida_ida.inf_get_cc_defalign, ida_ida.inf_set_cc_defalign),
INF_CC_ID : (ida_ida.inf_get_cc_id, ida_ida.inf_set_cc_id),
INF_CC_SIZE_B : (ida_ida.inf_get_cc_size_b, ida_ida.inf_set_cc_size_b),
INF_CC_SIZE_E : (ida_ida.inf_get_cc_size_e, ida_ida.inf_set_cc_size_e),
INF_CC_SIZE_I : (ida_ida.inf_get_cc_size_i, ida_ida.inf_set_cc_size_i),
INF_CC_SIZE_L : (ida_ida.inf_get_cc_size_l, ida_ida.inf_set_cc_size_l),
INF_CC_SIZE_LDBL : (ida_ida.inf_get_cc_size_ldbl, ida_ida.inf_set_cc_size_ldbl),
INF_CC_SIZE_LL : (ida_ida.inf_get_cc_size_ll, ida_ida.inf_set_cc_size_ll),
INF_CC_SIZE_S : (ida_ida.inf_get_cc_size_s, ida_ida.inf_set_cc_size_s),
INF_CMTFLAG : (ida_ida.inf_get_cmtflg, ida_ida.inf_set_cmtflg),
INF_CMT_INDENT : (ida_ida.inf_get_cmt_indent, ida_ida.inf_set_cmt_indent),
INF_DATABASE_CHANGE_COUNT : (ida_ida.inf_get_database_change_count, ida_ida.inf_set_database_change_count),
INF_DATATYPES : (ida_ida.inf_get_datatypes, ida_ida.inf_set_datatypes),
INF_DEMNAMES : (ida_ida.inf_get_demnames, ida_ida.inf_set_demnames),
INF_END_PRIVRANGE : (ida_ida.inf_get_privrange_end_ea, ida_ida.inf_set_privrange_end_ea),
INF_FILETYPE : (ida_ida.inf_get_filetype, ida_ida.inf_set_filetype),
INF_GENFLAGS : (ida_ida.inf_get_genflags, ida_ida.inf_set_genflags),
INF_HIGHOFF : (ida_ida.inf_get_highoff, ida_ida.inf_set_highoff),
INF_INDENT : (ida_ida.inf_get_indent, ida_ida.inf_set_indent),
INF_LENXREF : (ida_ida.inf_get_lenxref, ida_ida.inf_set_lenxref),
INF_LFLAGS : (ida_ida.inf_get_lflags, ida_ida.inf_set_lflags),
INF_LIMITER : (ida_ida.inf_get_limiter, ida_ida.inf_set_limiter),
INF_LISTNAMES : (ida_ida.inf_get_listnames, ida_ida.inf_set_listnames),
INF_LONG_DEMNAMES : (ida_ida.inf_get_long_demnames, ida_ida.inf_set_long_demnames),
INF_LOWOFF : (ida_ida.inf_get_lowoff, ida_ida.inf_set_lowoff),
INF_MAIN : (ida_ida.inf_get_main, ida_ida.inf_set_main),
INF_MARGIN : (ida_ida.inf_get_margin, ida_ida.inf_set_margin),
INF_MAXREF : (ida_ida.inf_get_maxref, ida_ida.inf_set_maxref),
INF_MAX_AUTONAME_LEN : (ida_ida.inf_get_max_autoname_len, ida_ida.inf_set_max_autoname_len),
INF_MAX_EA : (ida_ida.inf_get_max_ea, ida_ida.inf_set_max_ea),
INF_MIN_EA : (ida_ida.inf_get_min_ea, ida_ida.inf_set_min_ea),
INF_MODEL : (ida_ida.inf_get_cc_cm, ida_ida.inf_set_cc_cm),
INF_NAMETYPE : (ida_ida.inf_get_nametype, ida_ida.inf_set_nametype),
INF_NETDELTA : (ida_ida.inf_get_netdelta, ida_ida.inf_set_netdelta),
INF_OMAX_EA : (ida_ida.inf_get_omax_ea, ida_ida.inf_set_omax_ea),
INF_OMIN_EA : (ida_ida.inf_get_omin_ea, ida_ida.inf_set_omin_ea),
INF_OSTYPE : (ida_ida.inf_get_ostype, ida_ida.inf_set_ostype),
INF_OUTFLAGS : (ida_ida.inf_get_outflags, ida_ida.inf_set_outflags),
INF_PREFFLAG : (ida_ida.inf_get_prefflag, ida_ida.inf_set_prefflag),
INF_PRIVRANGE_END_EA : (ida_ida.inf_get_privrange_end_ea, ida_ida.inf_set_privrange_end_ea),
INF_PRIVRANGE_START_EA : (ida_ida.inf_get_privrange_start_ea, ida_ida.inf_set_privrange_start_ea),
INF_PROCNAME : (ida_ida.inf_get_procname, ida_ida.inf_set_procname),
INF_REFCMTNUM : (ida_ida.inf_get_refcmtnum, ida_ida.inf_set_refcmtnum),
INF_SHORT_DEMNAMES : (ida_ida.inf_get_short_demnames, ida_ida.inf_set_short_demnames),
INF_SPECSEGS : (ida_ida.inf_get_specsegs, ida_ida.inf_set_specsegs),
INF_START_CS : (ida_ida.inf_get_start_cs, ida_ida.inf_set_start_cs),
INF_START_EA : (ida_ida.inf_get_start_ea, ida_ida.inf_set_start_ea),
INF_START_IP : (ida_ida.inf_get_start_ip, ida_ida.inf_set_start_ip),
INF_START_PRIVRANGE : (ida_ida.inf_get_privrange_start_ea, ida_ida.inf_set_privrange_start_ea),
INF_START_SP : (ida_ida.inf_get_start_sp, ida_ida.inf_set_start_sp),
INF_START_SS : (ida_ida.inf_get_start_ss, ida_ida.inf_set_start_ss),
INF_STRLIT_BREAK : (ida_ida.inf_get_strlit_break, ida_ida.inf_set_strlit_break),
INF_STRLIT_FLAGS : (ida_ida.inf_get_strlit_flags, ida_ida.inf_set_strlit_flags),
INF_STRLIT_PREF : (ida_ida.inf_get_strlit_pref, ida_ida.inf_set_strlit_pref),
INF_STRLIT_SERNUM : (ida_ida.inf_get_strlit_sernum, ida_ida.inf_set_strlit_sernum),
INF_STRLIT_ZEROES : (ida_ida.inf_get_strlit_zeroes, ida_ida.inf_set_strlit_zeroes),
INF_STRTYPE : (ida_ida.inf_get_strtype, ida_ida.inf_set_strtype),
INF_TYPE_XREFNUM : (ida_ida.inf_get_type_xrefnum, ida_ida.inf_set_type_xrefnum),
INF_VERSION : (ida_ida.inf_get_version, ida_ida.inf_set_version),
INF_XREFFLAG : (ida_ida.inf_get_xrefflag, ida_ida.inf_set_xrefflag),
INF_XREFNUM : (ida_ida.inf_get_xrefnum, ida_ida.inf_set_xrefnum),
}
def get_inf_attr(attr):
return _INF_attrs_accessors[attr][0]()
def set_inf_attr(attr, value):
_INF_attrs_accessors[attr][1](value)
return 1
set_processor_type = ida_idp.set_processor_type
SETPROC_IDB = ida_idp.SETPROC_IDB
SETPROC_LOADER = ida_idp.SETPROC_LOADER
SETPROC_LOADER_NON_FATAL = ida_idp.SETPROC_LOADER_NON_FATAL
SETPROC_USER = ida_idp.SETPROC_USER
def SetPrcsr(processor): return set_processor_type(processor, SETPROC_USER)
set_target_assembler = ida_idp.set_target_assembler
def batch(batch):
batch_prev = ida_kernwin.cvar.batch
ida_kernwin.cvar.batch = batch
return batch_prev
#----------------------------------------------------------------------------
# I N T E R A C T I O N W I T H T H E U S E R
#----------------------------------------------------------------------------
def process_ui_action(name, flags=0):
return ida_kernwin.process_ui_action(name, flags)
ask_seg = ida_kernwin.ask_seg
ask_yn = ida_kernwin.ask_yn
msg = ida_kernwin.msg
warning = ida_kernwin.warning
error = ida_kernwin.error
set_ida_state = ida_auto.set_ida_state
IDA_STATUS_READY = 0 # READY IDA is idle
IDA_STATUS_THINKING = 1 # THINKING Analyzing but the user may press keys
IDA_STATUS_WAITING = 2 # WAITING Waiting for the user input
IDA_STATUS_WORK = 3 # BUSY IDA is busy
refresh_idaview_anyway = ida_kernwin.refresh_idaview_anyway
refresh_lists = ida_kernwin.refresh_choosers
#----------------------------------------------------------------------------
# S E G M E N T A T I O N
#----------------------------------------------------------------------------
def sel2para(sel):
s = ida_pro.sel_pointer()
base = ida_pro.ea_pointer()
res,tmp = ida_segment.getn_selector(sel, s.cast(), base.cast())
if not res:
return sel
else:
return base.value()
def find_selector(val):
return ida_segment.find_selector(val) & 0xFFFF
set_selector = ida_segment.set_selector
del_selector = ida_segment.del_selector
def get_first_seg():
seg = ida_segment.get_first_seg()
if not seg:
return BADADDR
else:
return seg.start_ea
def get_next_seg(ea):
nextseg = ida_segment.get_next_seg(ea)
if not nextseg:
return BADADDR
else:
return nextseg.start_ea
def get_segm_start(ea):
seg = ida_segment.getseg(ea)
if not seg:
return BADADDR
else:
return seg.start_ea
def get_segm_end(ea):
seg = ida_segment.getseg(ea)
if not seg:
return BADADDR
else:
return seg.end_ea
def get_segm_name(ea):
seg = ida_segment.getseg(ea)
if not seg:
return ""
else:
name = ida_segment.get_segm_name(seg)
if not name:
return ""
else:
return name
def add_segm_ex(startea, endea, base, use32, align, comb, flags):
s = ida_segment.segment_t()
s.start_ea = startea
s.end_ea = endea
s.sel = ida_segment.setup_selector(base)
s.bitness = use32
s.align = align
s.comb = comb
return ida_segment.add_segm_ex(s, "", "", flags)
ADDSEG_NOSREG = ida_segment.ADDSEG_NOSREG # set all default segment register values
# to BADSELs
# (undefine all default segment registers)
ADDSEG_OR_DIE = ida_segment. ADDSEG_OR_DIE # qexit() if can't add a segment
ADDSEG_NOTRUNC = ida_segment.ADDSEG_NOTRUNC
# of the next segment if they overlap.
# destroy/truncate old segments instead.
ADDSEG_QUIET = ida_segment.ADDSEG_QUIET # silent mode, no "Adding segment..." in the messages window
ADDSEG_FILLGAP = ida_segment.ADDSEG_FILLGAP # If there is a gap between the new segment
# and the previous one, and this gap is less
# than 64K, then fill the gap by extending the
# previous segment and adding .align directive
# to it. This way we avoid gaps between segments.
# Too many gaps lead to a virtual array failure.
# It cannot hold more than ~1000 gaps.
ADDSEG_SPARSE = ida_segment.ADDSEG_SPARSE # Use sparse storage method for the new segment
def AddSeg(startea, endea, base, use32, align, comb):
return add_segm_ex(startea, endea, base, use32, align, comb, ADDSEG_NOSREG)
del_segm = ida_segment.del_segm
SEGMOD_KILL = ida_segment.SEGMOD_KILL # disable addresses if segment gets
# shrinked or deleted
SEGMOD_KEEP = ida_segment.SEGMOD_KEEP # keep information (code & data, etc)
SEGMOD_SILENT = ida_segment.SEGMOD_SILENT # be silent
def set_segment_bounds(ea, startea, endea, flags):
return ida_segment.set_segm_start(ea, startea, flags) & \
ida_segment.set_segm_end(ea, endea, flags)
def set_segm_name(ea, name):
seg = ida_segment.getseg(ea)
if not seg:
return False
return ida_segment.set_segm_name(seg, name)
def set_segm_class(ea, segclass):
seg = ida_segment.getseg(ea)
if not seg:
return False
return ida_segment.set_segm_class(seg, segclass)
def set_segm_alignment(ea, alignment):
return set_segm_attr(ea, SEGATTR_ALIGN, alignment)
if ida_idaapi.uses_swig_builtins:
_scope = ida_segment.segment_t
else:
_scope = ida_segment
saAbs = _scope.saAbs # Absolute segment.
saRelByte = _scope.saRelByte # Relocatable, byte aligned.
saRelWord = _scope.saRelWord # Relocatable, word (2-byte, 16-bit) aligned.
saRelPara = _scope.saRelPara # Relocatable, paragraph (16-byte) aligned.
saRelPage = _scope.saRelPage # Relocatable, aligned on 256-byte boundary
# (a "page" in the original Intel specification).
saRelDble = _scope.saRelDble # Relocatable, aligned on a double word
# (4-byte) boundary. This value is used by
# the PharLap OMF for the same alignment.
saRel4K = _scope.saRel4K # This value is used by the PharLap OMF for
# page (4K) alignment. It is not supported
# by LINK.
saGroup = _scope.saGroup # Segment group
saRel32Bytes = _scope.saRel32Bytes # 32 bytes
saRel64Bytes = _scope.saRel64Bytes # 64 bytes
saRelQword = _scope.saRelQword # 8 bytes
def set_segm_combination(segea, comb):
return set_segm_attr(segea, SEGATTR_COMB, comb)
scPriv = _scope.scPriv # Private. Do not combine with any other program
# segment.
scPub = _scope.scPub # Public. Combine by appending at an offset that
# meets the alignment requirement.
scPub2 = _scope.scPub2 # As defined by Microsoft, same as C=2 (public).
scStack = _scope.scStack # Stack. Combine as for C=2. This combine type
# forces byte alignment.
scCommon = _scope.scCommon # Common. Combine by overlay using maximum size.
scPub3 = _scope.scPub3 # As defined by Microsoft, same as C=2 (public).
def set_segm_addressing(ea, bitness):
seg = ida_segment.getseg(ea)
if not seg:
return False
seg.bitness = bitness
return True
def selector_by_name(segname):
seg = ida_segment.get_segm_by_name(segname)
if not seg:
return BADADDR
return seg.sel
def set_default_sreg_value(ea, reg, value):
seg = ida_segment.getseg(ea)
reg = ida_idp.str2reg(reg);
if seg and reg >= 0:
return ida_segregs.set_default_sreg_value(seg, reg, value)
else:
return False
def set_segm_type(segea, segtype):
seg = ida_segment.getseg(segea)
if not seg:
return False
seg.type = segtype
return seg.update()
SEG_NORM = _scope.SEG_NORM
SEG_XTRN = _scope.SEG_XTRN # * segment with 'extern' definitions
# no instructions are allowed
SEG_CODE = _scope.SEG_CODE # pure code segment
SEG_DATA = _scope.SEG_DATA # pure data segment
SEG_IMP = _scope.SEG_IMP # implementation segment
SEG_GRP = _scope.SEG_GRP # * group of segments
# no instructions are allowed
SEG_NULL = _scope.SEG_NULL # zero-length segment
SEG_UNDF = _scope.SEG_UNDF # undefined segment type
SEG_BSS = _scope.SEG_BSS # uninitialized segment
SEG_ABSSYM = _scope.SEG_ABSSYM # * segment with definitions of absolute symbols
# no instructions are allowed
SEG_COMM = _scope.SEG_COMM # * segment with communal definitions
# no instructions are allowed
SEG_IMEM = _scope.SEG_IMEM # internal processor memory & sfr (8051)
def get_segm_attr(segea, attr):
seg = ida_segment.getseg(segea)
assert seg, "could not find segment at 0x%x" % segea
if attr in [ SEGATTR_ES, SEGATTR_CS, SEGATTR_SS, SEGATTR_DS, SEGATTR_FS, SEGATTR_GS ]:
return ida_segment.get_defsr(seg, _SEGATTRMAP[attr][1])
else:
return _IDC_GetAttr(seg, _SEGATTRMAP, attr)
def set_segm_attr(segea, attr, value):
seg = ida_segment.getseg(segea)
assert seg, "could not find segment at 0x%x" % segea
if attr in [ SEGATTR_ES, SEGATTR_CS, SEGATTR_SS, SEGATTR_DS, SEGATTR_FS, SEGATTR_GS ]:
ida_segment.set_defsr(seg, _SEGATTRMAP[attr][1], value)
else:
_IDC_SetAttr(seg, _SEGATTRMAP, attr, value)
return seg.update()
SEGATTR_START = 0 # starting address
SEGATTR_END = 4 # ending address
SEGATTR_ORGBASE = 16
SEGATTR_ALIGN = 20 # alignment
SEGATTR_COMB = 21 # combination
SEGATTR_PERM = 22 # permissions
SEGATTR_BITNESS = 23 # bitness (0: 16, 1: 32, 2: 64 bit segment)
# Note: modifying the attribute directly does
# not lead to the reanalysis of the segment.
# Using set_segm_addressing() is more correct.
SEGATTR_FLAGS = 24 # segment flags
SEGATTR_SEL = 28 # segment selector
SEGATTR_ES = 32 # default ES value
SEGATTR_CS = 36 # default CS value
SEGATTR_SS = 40 # default SS value
SEGATTR_DS = 44 # default DS value
SEGATTR_FS = 48 # default FS value
SEGATTR_GS = 52 # default GS value
SEGATTR_TYPE = 96 # segment type
SEGATTR_COLOR = 100 # segment color
# Redefining these for 64-bit
if __EA64__:
SEGATTR_START = 0
SEGATTR_END = 8
SEGATTR_ORGBASE = 32
SEGATTR_ALIGN = 40
SEGATTR_COMB = 41
SEGATTR_PERM = 42
SEGATTR_BITNESS = 43
SEGATTR_FLAGS = 44
SEGATTR_SEL = 48
SEGATTR_ES = 56
SEGATTR_CS = 64
SEGATTR_SS = 72
SEGATTR_DS = 80
SEGATTR_FS = 88
SEGATTR_GS = 96
SEGATTR_TYPE = 184
SEGATTR_COLOR = 188
_SEGATTRMAP = {
SEGATTR_START : (True, 'start_ea'),
SEGATTR_END : (True, 'end_ea'),
SEGATTR_ORGBASE : (False, 'orgbase'),
SEGATTR_ALIGN : (False, 'align'),
SEGATTR_COMB : (False, 'comb'),
SEGATTR_PERM : (False, 'perm'),
SEGATTR_BITNESS : (False, 'bitness'),
SEGATTR_FLAGS : (False, 'flags'),
SEGATTR_SEL : (False, 'sel'),
SEGATTR_ES : (False, 0),
SEGATTR_CS : (False, 1),
SEGATTR_SS : (False, 2),
SEGATTR_DS : (False, 3),
SEGATTR_FS : (False, 4),
SEGATTR_GS : (False, 5),
SEGATTR_TYPE : (False, 'type'),
SEGATTR_COLOR : (False, 'color'),
}
# Valid segment flags
SFL_COMORG = 0x01 # IDP dependent field (IBM PC: if set, ORG directive is not commented out)
SFL_OBOK = 0x02 # orgbase is present? (IDP dependent field)
SFL_HIDDEN = 0x04 # is the segment hidden?
SFL_DEBUG = 0x08 # is the segment created for the debugger?
SFL_LOADER = 0x10 # is the segment created by the loader?
SFL_HIDETYPE = 0x20 # hide segment type (do not print it in the listing)
def move_segm(ea, to, flags):
seg = ida_segment.getseg(ea)
if not seg:
return MOVE_SEGM_PARAM
return ida_segment.move_segm(seg, to, flags)
MSF_SILENT = 0x0001 # don't display a "please wait" box on the screen
MSF_NOFIX = 0x0002
MSF_LDKEEP = 0x0004 # keep the loader in the memory (optimization)
MSF_FIXONCE = 0x0008 # valid for rebase_program(): call loader only once
MOVE_SEGM_OK = 0 # all ok
MOVE_SEGM_PARAM = -1 # The specified segment does not exist
MOVE_SEGM_ROOM = -2 # Not enough free room at the target address
MOVE_SEGM_IDP = -3 # IDP module forbids moving the segment
MOVE_SEGM_CHUNK = -4 # Too many chunks are defined, can't move
MOVE_SEGM_LOADER = -5
MOVE_SEGM_ODD = -6
rebase_program = ida_segment.rebase_program
set_storage_type = ida_bytes.change_storage_type
STT_VA = 0 # regular storage: virtual arrays, an explicit flag for each byte
STT_MM = 1 # memory map: sparse storage. useful for huge objects
#----------------------------------------------------------------------------
# C R O S S R E F E R E N C E S
#----------------------------------------------------------------------------
# Flow types (combine with XREF_USER!):
fl_CF = 16 # Call Far
fl_CN = 17 # Call Near
fl_JF = 18 # jumpto Far
fl_JN = 19 # jumpto Near
fl_F = 21 # Ordinary flow
XREF_USER = 32 # All user-specified xref types
# must be combined with this bit
# Mark exec flow 'from' 'to'
add_cref = ida_xref.add_cref
del_cref = ida_xref.del_cref
# The following functions include the ordinary flows:
# (the ordinary flow references are returned first)
get_first_cref_from = ida_xref.get_first_cref_from
get_next_cref_from = ida_xref.get_next_cref_from
get_first_cref_to = ida_xref.get_first_cref_to
get_next_cref_to = ida_xref.get_next_cref_to
# The following functions don't take into account the ordinary flows:
get_first_fcref_from = ida_xref.get_first_fcref_from
get_next_fcref_from = ida_xref.get_next_fcref_from
get_first_fcref_to = ida_xref.get_first_fcref_to
get_next_fcref_to = ida_xref.get_next_fcref_to
dr_O = ida_xref.dr_O
dr_W = ida_xref.dr_W
dr_R = ida_xref.dr_R
dr_T = ida_xref.dr_T
dr_I = ida_xref.dr_I
add_dref = ida_xref.add_dref
del_dref = ida_xref.del_dref
get_first_dref_from = ida_xref.get_first_dref_from
get_next_dref_from = ida_xref.get_next_dref_from
get_first_dref_to = ida_xref.get_first_dref_to
get_next_dref_to = ida_xref.get_next_dref_to
def get_xref_type():
raise DeprecatedIDCError("use XrefsFrom() XrefsTo() from idautils instead.")
def fopen(f, mode):
raise DeprecatedIDCError("fopen() deprecated. Use Python file objects instead.")
def fclose(handle):
raise DeprecatedIDCError("fclose() deprecated. Use Python file objects instead.")
def filelength(handle):
raise DeprecatedIDCError("filelength() deprecated. Use Python file objects instead.")
def fseek(handle, offset, origin):
raise DeprecatedIDCError("fseek() deprecated. Use Python file objects instead.")
def ftell(handle):
raise DeprecatedIDCError("ftell() deprecated. Use Python file objects instead.")
def LoadFile(filepath, pos, ea, size):
li = ida_diskio.open_linput(filepath, False)
if li:
retval = ida_loader.file2base(li, pos, ea, ea+size, False)
ida_diskio.close_linput(li)
return retval
else:
return 0
def loadfile(filepath, pos, ea, size): return LoadFile(filepath, pos, ea, size)
def SaveFile(filepath, pos, ea, size):
if ( os.path.isfile(filepath) ):
of = ida_diskio.fopenM(filepath)
else:
of = ida_diskio.fopenWB(filepath)
if of:
retval = ida_loader.base2file(of, pos, ea, ea+size)
ida_diskio.eclose(of)
return retval
else:
return 0
def savefile(filepath, pos, ea, size): return SaveFile(filepath, pos, ea, size)
def fgetc(handle):
raise DeprecatedIDCError("fgetc() deprecated. Use Python file objects instead.")
def fputc(byte, handle):
raise DeprecatedIDCError("fputc() deprecated. Use Python file objects instead.")
def fprintf(handle, format, *args):
raise DeprecatedIDCError("fprintf() deprecated. Use Python file objects instead.")
def readshort(handle, mostfirst):
raise DeprecatedIDCError("readshort() deprecated. Use Python file objects instead.")
def readlong(handle, mostfirst):
raise DeprecatedIDCError("readlong() deprecated. Use Python file objects instead.")
def writeshort(handle, word, mostfirst):
raise DeprecatedIDCError("writeshort() deprecated. Use Python file objects instead.")
def writelong(handle, dword, mostfirst):
raise DeprecatedIDCError("writelong() deprecated. Use Python file objects instead.")
def readstr(handle):
raise DeprecatedIDCError("readstr() deprecated. Use Python file objects instead.")
def writestr(handle, s):
raise DeprecatedIDCError("writestr() deprecated. Use Python file objects instead.")
add_func = ida_funcs.add_func
del_func = ida_funcs.del_func
set_func_end = ida_funcs.set_func_end
def get_next_func(ea):
func = ida_funcs.get_next_func(ea)
if not func:
return BADADDR
else:
return func.start_ea
def get_prev_func(ea):
func = ida_funcs.get_prev_func(ea)
if not func:
return BADADDR
else:
return func.start_ea
def get_func_attr(ea, attr):
func = ida_funcs.get_func(ea)
return _IDC_GetAttr(func, _FUNCATTRMAP, attr) if func else BADADDR
def set_func_attr(ea, attr, value):
func = ida_funcs.get_func(ea)
if func:
_IDC_SetAttr(func, _FUNCATTRMAP, attr, value)
return ida_funcs.update_func(func)
return 0
FUNCATTR_START = 0
FUNCATTR_END = 4
FUNCATTR_FLAGS = 8
FUNCATTR_FRAME = 16
FUNCATTR_FRSIZE = 20
FUNCATTR_FRREGS = 24
FUNCATTR_ARGSIZE = 28
FUNCATTR_FPD = 32
FUNCATTR_COLOR = 36
FUNCATTR_OWNER = 16
FUNCATTR_REFQTY = 20
if __EA64__:
FUNCATTR_START = 0
FUNCATTR_END = 8
FUNCATTR_FLAGS = 16
FUNCATTR_FRAME = 24
FUNCATTR_FRSIZE = 32
FUNCATTR_FRREGS = 40
FUNCATTR_ARGSIZE = 48
FUNCATTR_FPD = 56
FUNCATTR_COLOR = 64
FUNCATTR_OWNER = 24
FUNCATTR_REFQTY = 32
_FUNCATTRMAP = {
FUNCATTR_START : (True, 'start_ea'),
FUNCATTR_END : (True, 'end_ea'),
FUNCATTR_FLAGS : (False, 'flags'),
FUNCATTR_FRAME : (True, 'frame'),
FUNCATTR_FRSIZE : (True, 'frsize'),
FUNCATTR_FRREGS : (True, 'frregs'),
FUNCATTR_ARGSIZE : (True, 'argsize'),
FUNCATTR_FPD : (False, 'fpd'),
FUNCATTR_COLOR : (False, 'color'),
FUNCATTR_OWNER : (True, 'owner'),
FUNCATTR_REFQTY : (True, 'refqty')
}
def get_func_flags(ea):
func = ida_funcs.get_func(ea)
if not func:
return -1
else:
return func.flags
if ida_idaapi.uses_swig_builtins:
_scope = ida_funcs.func_t
else:
_scope = ida_funcs
FUNC_NORET = _scope.FUNC_NORET
FUNC_FAR = _scope.FUNC_FAR # far function
FUNC_LIB = _scope.FUNC_LIB # library function
FUNC_STATIC = _scope.FUNC_STATICDEF # static function
FUNC_FRAME = _scope.FUNC_FRAME # function uses frame pointer (BP)
FUNC_USERFAR = _scope.FUNC_USERFAR # user has specified far-ness
# of the function
FUNC_HIDDEN = _scope.FUNC_HIDDEN # a hidden function
FUNC_THUNK = _scope.FUNC_THUNK # thunk (jump) function
FUNC_BOTTOMBP = _scope.FUNC_BOTTOMBP # BP points to the bottom of the stack frame
FUNC_NORET_PENDING = _scope.FUNC_NORET_PENDING # Function 'non-return' analysis
# must be performed. This flag is
# verified upon func_does_return()
FUNC_SP_READY = _scope.FUNC_SP_READY # SP-analysis has been performed
# If this flag is on, the stack
# change points should not be not
# modified anymore. Currently this
# analysis is performed only for PC
FUNC_PURGED_OK = _scope.FUNC_PURGED_OK # 'argsize' field has been validated.
# If this bit is clear and 'argsize'
# is 0, then we do not known the real
# number of bytes removed from
# the stack. This bit is handled
# by the processor module.
FUNC_TAIL = _scope.FUNC_TAIL # This is a function tail.
# Other bits must be clear
# (except FUNC_HIDDEN)
def set_func_flags(ea, flags):
func = ida_funcs.get_func(ea)
if not func:
return 0
else:
func.flags = flags
ida_funcs.update_func(func)
return 1
def get_func_name(ea):
name = ida_funcs.get_func_name(ea)
if not name:
return ""
else:
return name
def get_func_cmt(ea, repeatable):
func = ida_funcs.get_func(ea)
if not func:
return ""
else:
comment = ida_funcs.get_func_cmt(func, repeatable)
if not comment:
return ""
else:
return comment
def set_func_cmt(ea, cmt, repeatable):
func = ida_funcs.get_func(ea)
if not func:
return None
else:
return ida_funcs.set_func_cmt(func, cmt, repeatable)
def choose_func(title):
f = ida_kernwin.choose_func(title, ida_idaapi.BADADDR)
return BADADDR if f is None else f.start_ea
def get_func_off_str(ea):
flags = ida_name.GNCN_NOCOLOR | ida_name.GNCN_REQFUNC
return ida_name.get_nice_colored_name(ea, flags)
def find_func_end(ea):
func = ida_funcs.func_t(ea)
res = ida_funcs.find_func_bounds(func, ida_funcs.FIND_FUNC_DEFINE)
if res == ida_funcs.FIND_FUNC_UNDEF:
return BADADDR
else:
return func.end_ea
def get_frame_id(ea):
frame = ida_frame.get_frame(ea)
if frame:
return frame.id
else:
return None
def get_frame_lvar_size(ea):
return get_func_attr(ea, FUNCATTR_FRSIZE)
def get_frame_regs_size(ea):
return get_func_attr(ea, FUNCATTR_FRREGS)
def get_frame_args_size(ea):
return get_func_attr(ea, FUNCATTR_ARGSIZE)
def get_frame_size(ea):
func = ida_funcs.get_func(ea)
if not func:
return 0
else:
return ida_frame.get_frame_size(func)
def set_frame_size(ea, lvsize, frregs, argsize):
func = ida_funcs.get_func(ea)
if func is None:
return -1
frameid = ida_frame.add_frame(func, lvsize, frregs, argsize)
if not frameid:
if not ida_frame.set_frame_size(func, lvsize, frregs, argsize):
return -1
return func.frame
def get_spd(ea):
func = ida_funcs.get_func(ea)
if not func:
return None
return ida_frame.get_spd(func, ea)
def get_sp_delta(ea):
func = ida_funcs.get_func(ea)
if not func:
return None
return ida_frame.get_sp_delta(func, ea)
# ----------------------------------------------------------------------------
# S T A C K
# ----------------------------------------------------------------------------
def add_auto_stkpnt(func_ea, ea, delta):
pfn = ida_funcs.get_func(func_ea)
if not pfn:
return 0
return ida_frame.add_auto_stkpnt(pfn, ea, delta)
add_user_stkpnt = ida_frame.add_user_stkpnt
def del_stkpnt(func_ea, ea):
pfn = ida_funcs.get_func(func_ea)
if not pfn:
return 0
return ida_frame.del_stkpnt(pfn, ea)
def get_min_spd_ea(func_ea):
pfn = ida_funcs.get_func(func_ea)
if not pfn:
return BADADDR
return ida_frame.get_min_spd_ea(pfn)
recalc_spd = ida_frame.recalc_spd
# ----------------------------------------------------------------------------
# E N T R Y P O I N T S
# ----------------------------------------------------------------------------
get_entry_qty = ida_entry.get_entry_qty
add_entry = ida_entry.add_entry
get_entry_ordinal = ida_entry.get_entry_ordinal
get_entry = ida_entry.get_entry
get_entry_name = ida_entry.get_entry_name
rename_entry = ida_entry.rename_entry
# ----------------------------------------------------------------------------
# F I X U P S
# ----------------------------------------------------------------------------
get_next_fixup_ea = ida_fixup.get_next_fixup_ea
get_prev_fixup_ea = ida_fixup.get_prev_fixup_ea
def get_fixup_target_type(ea):
fd = ida_fixup.fixup_data_t()
if not fd.get(ea):
return 0
return fd.get_type()
FIXUP_OFF8 = 13 # 8-bit offset.
FIXUP_OFF16 = 1 # 16-bit offset.
FIXUP_SEG16 = 2 # 16-bit base--logical segment base (selector).
FIXUP_PTR32 = 3 # 32-bit long pointer (16-bit base:16-bit
# offset).
FIXUP_OFF32 = 4 # 32-bit offset.
FIXUP_PTR48 = 5 # 48-bit pointer (16-bit base:32-bit offset).
FIXUP_HI8 = 6 # high 8 bits of 16bit offset
FIXUP_HI16 = 7 # high 16 bits of 32bit offset
FIXUP_LOW8 = 8 # low 8 bits of 16bit offset
FIXUP_LOW16 = 9 # low 16 bits of 32bit offset
FIXUP_OFF64 = 12 # 64-bit offset
FIXUP_CUSTOM = 0x8000 # fixups with this bit are processed by
# processor module/plugin
def get_fixup_target_flags(ea):
fd = ida_fixup.fixup_data_t()
if not fd.get(ea):
return 0
return fd.get_flags()
FIXUPF_REL = 0x1 # fixup is relative to the linear address
FIXUPF_EXTDEF = 0x2 # target is a location (otherwise - segment)
FIXUPF_UNUSED = 0x4 # fixup is ignored by IDA
FIXUPF_CREATED = 0x8 # fixup was not present in the input file
def get_fixup_target_sel(ea):
fd = ida_fixup.fixup_data_t()
if not fd.get(ea):
return BADSEL
return fd.sel
def get_fixup_target_off(ea):
fd = ida_fixup.fixup_data_t()
if not fd.get(ea):
return BADADDR
return fd.off
def get_fixup_target_dis(ea):
fd = ida_fixup.fixup_data_t()
if not fd.get(ea):
return 0
return fd.displacement
def set_fixup(ea, fixuptype, fixupflags, targetsel, targetoff, displ):
fd = ida_fixup.fixup_data_t(fixuptype, fixupflags)
fd.sel = targetsel
fd.off = targetoff
fd.displacement = displ
fd.set(ea)
del_fixup = ida_fixup.del_fixup
#----------------------------------------------------------------------------
# M A R K E D P O S I T I O N S
#----------------------------------------------------------------------------
put_bookmark = ida_idc.mark_position
get_bookmark = ida_idc.get_marked_pos
get_bookmark_desc = ida_idc.get_mark_comment
# ----------------------------------------------------------------------------
# S T R U C T U R E S
# ----------------------------------------------------------------------------
get_struc_qty = ida_struct.get_struc_qty
get_first_struc_idx = ida_struct.get_first_struc_idx
get_last_struc_idx = ida_struct.get_last_struc_idx
get_next_struc_idx = ida_struct.get_next_struc_idx
get_prev_struc_idx = ida_struct.get_prev_struc_idx
get_struc_idx = ida_struct.get_struc_idx
get_struc_by_idx = ida_struct.get_struc_by_idx
get_struc_id = ida_struct.get_struc_id
get_struc_name = ida_struct.get_struc_name
get_struc_cmt = ida_struct.get_struc_cmt
get_struc_size = ida_struct.get_struc_size
def get_member_qty(sid):
s = ida_struct.get_struc(sid)
return -1 if not s else s.memqty
def get_member_id(sid, member_offset):
s = ida_struct.get_struc(sid)
if not s:
return -1
m = ida_struct.get_member(s, member_offset)
if not m:
return -1
return m.id
def get_prev_offset(sid, offset):
s = ida_struct.get_struc(sid)
if not s:
return -1
return ida_struct.get_struc_prev_offset(s, offset)
def get_next_offset(sid, offset):
s = ida_struct.get_struc(sid)
return -1 if not s else ida_struct.get_struc_next_offset(s, offset)
def get_first_member(sid):
s = ida_struct.get_struc(sid)
if not s:
return -1
return ida_struct.get_struc_first_offset(s)
def get_last_member(sid):
s = ida_struct.get_struc(sid)
if not s:
return -1
return ida_struct.get_struc_last_offset(s)
def get_member_offset(sid, member_name):
s = ida_struct.get_struc(sid)
if not s:
return -1
m = ida_struct.get_member_by_name(s, member_name)
if not m:
return -1
return m.get_soff()
def get_member_name(sid, member_offset):
s = ida_struct.get_struc(sid)
if not s:
return None
m = ida_struct.get_member(s, member_offset)
if not m:
return None
return ida_struct.get_member_name(m.id)
def get_member_cmt(sid, member_offset, repeatable):
s = ida_struct.get_struc(sid)
if not s:
return None
m = ida_struct.get_member(s, member_offset)
if not m:
return None
return ida_struct.get_member_cmt(m.id, repeatable)
def get_member_size(sid, member_offset):
s = ida_struct.get_struc(sid)
if not s:
return None
m = ida_struct.get_member(s, member_offset)
if not m:
return None
return ida_struct.get_member_size(m)
def get_member_flag(sid, member_offset):
s = ida_struct.get_struc(sid)
if not s:
return -1
m = ida_struct.get_member(s, member_offset)
return -1 if not m else m.flag
def get_member_strid(sid, member_offset):
s = ida_struct.get_struc(sid)
if not s:
return -1
m = ida_struct.get_member(s, member_offset)
if not m:
return -1
cs = ida_struct.get_sptr(m)
if cs:
return cs.id
else:
return -1
def is_union(sid):
s = ida_struct.get_struc(sid)
if not s:
return 0
return s.is_union()
def add_struc(index, name, is_union):
if index == -1:
index = BADADDR
return ida_struct.add_struc(index, name, is_union)
def del_struc(sid):
s = ida_struct.get_struc(sid)
if not s:
return 0
return ida_struct.del_struc(s)
def set_struc_idx(sid, index):
s = ida_struct.get_struc(sid)
if not s:
return 0
return ida_struct.set_struc_idx(s, index)
set_struc_name = ida_struct.set_struc_name
set_struc_cmt = ida_struct.set_struc_cmt
def add_struc_member(sid, name, offset, flag, typeid, nbytes, target=-1, tdelta=0, reftype=REF_OFF32):
if is_off0(flag):
return eval_idc('add_struc_member(%d, "%s", %d, %d, %d, %d, %d, %d, %d);' % (sid, ida_kernwin.str2user(name or ""), offset, flag, typeid, nbytes,
target, tdelta, reftype))
else:
return eval_idc('add_struc_member(%d, "%s", %d, %d, %d, %d);' % (sid, ida_kernwin.str2user(name or ""), offset, flag, typeid, nbytes))
STRUC_ERROR_MEMBER_NAME = -1 # already has member with this name (bad name)
STRUC_ERROR_MEMBER_OFFSET = -2 # already has member at this offset
STRUC_ERROR_MEMBER_SIZE = -3 # bad number of bytes or bad sizeof(type)
STRUC_ERROR_MEMBER_TINFO = -4 # bad typeid parameter
STRUC_ERROR_MEMBER_STRUCT = -5 # bad struct id (the 1st argument)
STRUC_ERROR_MEMBER_UNIVAR = -6 # unions can't have variable sized members
STRUC_ERROR_MEMBER_VARLAST = -7
def del_struc_member(sid, member_offset):
s = ida_struct.get_struc(sid)
if not s:
return 0
return ida_struct.del_struc_member(s, member_offset)
def set_member_name(sid, member_offset, name):
s = ida_struct.get_struc(sid)
if not s:
return 0
return ida_struct.set_member_name(s, member_offset, name)
def set_member_type(sid, member_offset, flag, typeid, nitems, target=-1, tdelta=0, reftype=REF_OFF32):
if is_off0(flag):
return eval_idc('set_member_type(%d, %d, %d, %d, %d, %d, %d, %d);' % (sid, member_offset, flag, typeid, nitems,
target, tdelta, reftype))
else:
return eval_idc('set_member_type(%d, %d, %d, %d, %d);' % (sid, member_offset, flag, typeid, nitems))
def set_member_cmt(sid, member_offset, comment, repeatable):
s = ida_struct.get_struc(sid)
if not s:
return 0
m = ida_struct.get_member(s, member_offset)
if not m:
return 0
return ida_struct.set_member_cmt(m, comment, repeatable)
def expand_struc(sid, offset, delta, recalc):
s = ida_struct.get_struc(sid)
if not s:
return 0
return ida_struct.expand_struc(s, offset, delta, recalc)
def get_fchunk_attr(ea, attr):
func = ida_funcs.get_fchunk(ea)
return _IDC_GetAttr(func, _FUNCATTRMAP, attr) if func else BADADDR
def set_fchunk_attr(ea, attr, value):
if attr in [ FUNCATTR_START, FUNCATTR_END, FUNCATTR_OWNER ]:
chunk = ida_funcs.get_fchunk(ea)
if chunk:
_IDC_SetAttr(chunk, _FUNCATTRMAP, attr, value)
return ida_funcs.update_func(chunk)
return 0
get_fchunk_referer = ida_funcs.get_fchunk_referer
def get_next_fchunk(ea):
func = ida_funcs.get_next_fchunk(ea)
if func:
return func.start_ea
else:
return BADADDR
def get_prev_fchunk(ea):
func = ida_funcs.get_prev_fchunk(ea)
if func:
return func.start_ea
else:
return BADADDR
def append_func_tail(funcea, ea1, ea2):
func = ida_funcs.get_func(funcea)
if not func:
return 0
else:
return ida_funcs.append_func_tail(func, ea1, ea2)
def remove_fchunk(funcea, tailea):
func = ida_funcs.get_func(funcea)
if not func:
return 0
else:
return ida_funcs.remove_func_tail(func, tailea)
def set_tail_owner(tailea, funcea):
tail = ida_funcs.get_fchunk(tailea)
if not tail:
return False
else:
return ida_funcs.set_tail_owner(tail, funcea)
def first_func_chunk(funcea):
func = ida_funcs.get_func(funcea)
fci = ida_funcs.func_tail_iterator_t(func, funcea)
if fci.main():
return fci.chunk().start_ea
else:
return BADADDR
def next_func_chunk(funcea, tailea):
func = ida_funcs.get_func(funcea)
fci = ida_funcs.func_tail_iterator_t(func, funcea)
if not fci.main():
return BADADDR
found = False
while True:
if fci.chunk().start_ea <= tailea and \
fci.chunk().end_ea > tailea:
found = True
break
if not next(fci):
break
if found and next(fci):
return fci.chunk().start_ea
else:
return BADADDR
get_enum_qty = ida_enum.get_enum_qty
getn_enum = ida_enum.getn_enum
get_enum_idx = ida_enum.get_enum_idx
get_enum = ida_enum.get_enum
get_enum_name = ida_enum.get_enum_name
get_enum_cmt = ida_enum.get_enum_cmt
get_enum_size = ida_enum.get_enum_size
get_enum_width = ida_enum.get_enum_width
get_enum_flag = ida_enum.get_enum_flag
get_enum_member_by_name = ida_enum.get_enum_member_by_name
get_enum_member_value = ida_enum.get_enum_member_value
get_enum_member_bmask = ida_enum.get_enum_member_bmask
get_enum_member_enum = ida_enum.get_enum_member_enum
def get_enum_member(enum_id, value, serial, bmask):
if bmask < 0:
bmask &= BADADDR
return ida_enum.get_enum_member(enum_id, value, serial, bmask)
get_first_bmask = ida_enum.get_first_bmask
get_last_bmask = ida_enum.get_last_bmask
get_next_bmask = ida_enum.get_next_bmask
get_prev_bmask = ida_enum.get_prev_bmask
def get_bmask_name(enum_id, bmask):
if bmask < 0:
bmask &= BADADDR
return ida_enum.get_bmask_name(enum_id, bmask)
def get_bmask_cmt(enum_id, bmask, repeatable):
if bmask < 0:
bmask &= BADADDR
return ida_enum.get_bmask_cmt(enum_id, bmask, repeatable)
def set_bmask_name(enum_id, bmask, name):
if bmask < 0:
bmask &= BADADDR
return ida_enum.set_bmask_name(enum_id, bmask, name)
def set_bmask_cmt(enum_id, bmask, cmt, repeatable):
if bmask < 0:
bmask &= BADADDR
return ida_enum.set_bmask_cmt(enum_id, bmask, cmt, repeatable)
def get_first_enum_member(enum_id, bmask):
if bmask < 0:
bmask &= BADADDR
return ida_enum.get_first_enum_member(enum_id, bmask)
def get_last_enum_member(enum_id, bmask):
if bmask < 0:
bmask &= BADADDR
return ida_enum.get_last_enum_member(enum_id, bmask)
def get_next_enum_member(enum_id, value, bmask):
if bmask < 0:
bmask &= BADADDR
return ida_enum.get_next_enum_member(enum_id, value, bmask)
def get_prev_enum_member(enum_id, value, bmask):
if bmask < 0:
bmask &= BADADDR
return ida_enum.get_prev_enum_member(enum_id, value, bmask)
def get_enum_member_name(const_id):
name = ida_enum.get_enum_member_name(const_id)
if not name:
return ""
else:
return name
def get_enum_member_cmt(const_id, repeatable):
cmt = ida_enum.get_enum_member_cmt(const_id, repeatable)
if not cmt:
return ""
else:
return cmt
def add_enum(idx, name, flag):
if idx < 0:
idx = idx & SIZE_MAX
return ida_enum.add_enum(idx, name, flag)
del_enum = ida_enum.del_enum
set_enum_idx = ida_enum.set_enum_idx
set_enum_name = ida_enum.set_enum_name
set_enum_cmt = ida_enum.set_enum_cmt
set_enum_flag = ida_enum.set_enum_flag
set_enum_bf = ida_enum.set_enum_bf
set_enum_width = ida_enum.set_enum_width
is_bf = ida_enum.is_bf
def add_enum_member(enum_id, name, value, bmask):
if bmask < 0:
bmask &= BADADDR
return ida_enum.add_enum_member(enum_id, name, value, bmask)
ENUM_MEMBER_ERROR_NAME = ida_enum.ENUM_MEMBER_ERROR_NAME
ENUM_MEMBER_ERROR_VALUE = ida_enum.ENUM_MEMBER_ERROR_VALUE
ENUM_MEMBER_ERROR_ENUM = ida_enum.ENUM_MEMBER_ERROR_ENUM
ENUM_MEMBER_ERROR_MASK = ida_enum.ENUM_MEMBER_ERROR_MASK
ENUM_MEMBER_ERROR_ILLV = ida_enum.ENUM_MEMBER_ERROR_ILLV
def del_enum_member(enum_id, value, serial, bmask):
if bmask < 0:
bmask &= BADADDR
return ida_enum.del_enum_member(enum_id, value, serial, bmask)
set_enum_member_name = ida_enum.set_enum_member_name
set_enum_member_cmt = ida_enum.set_enum_member_cmt
_IDC_ARRAY_PREFIX = "$ idc_array "
def __l2m1(v):
if v == ida_netnode.BADNODE:
return -1
else:
return v
AR_LONG = ida_netnode.atag
AR_STR = ida_netnode.stag
class __dummy_netnode(object):
def rename(self, *args): return 0
def kill(self, *args): pass
def index(self, *args): return -1
def altset(self, *args): return 0
def supset(self, *args): return 0
def altval(self, *args): return 0
def supval(self, *args): return 0
def altdel(self, *args): return 0
def supdel(self, *args): return 0
def altfirst(self, *args): return -1
def supfirst(self, *args): return -1
def altlast(self, *args): return -1
def suplast(self, *args): return -1
def altnext(self, *args): return -1
def supnext(self, *args): return -1
def altprev(self, *args): return -1
def supprev(self, *args): return -1
def hashset(self, *args): return 0
def hashval(self, *args): return 0
def hashstr(self, *args): return 0
def hashstr_buf(self, *args): return 0
def hashset_idx(self, *args): return 0
def hashset_buf(self, *args): return 0
def hashval_long(self, *args): return 0
def hashdel(self, *args): return 0
def hashfirst(self, *args): return 0
def hashnext(self, *args): return 0
def hashprev(self, *args): return 0
def hashlast(self, *args): return 0
__dummy_netnode.instance = __dummy_netnode()
def __GetArrayById(array_id):
try:
node = ida_netnode.netnode(array_id)
nodename = node.get_name()
if nodename is None or not nodename.startswith(_IDC_ARRAY_PREFIX):
return __dummy_netnode.instance
else:
return node
except TypeError:
return __dummy_netnode.instance
except NotImplementedError:
return __dummy_netnode.instance
def create_array(name):
node = ida_netnode.netnode()
res = node.create(_IDC_ARRAY_PREFIX + name)
if res == False:
return -1
else:
return node.index()
def get_array_id(name):
return __l2m1(ida_netnode.netnode(_IDC_ARRAY_PREFIX + name, 0, False).index())
def rename_array(array_id, newname):
return __GetArrayById(array_id).rename(_IDC_ARRAY_PREFIX + newname) == 1
def delete_array(array_id):
__GetArrayById(array_id).kill()
def set_array_long(array_id, idx, value):
return __GetArrayById(array_id).altset(idx, value)
def set_array_string(array_id, idx, value):
return __GetArrayById(array_id).supset(idx, value)
def get_array_element(tag, array_id, idx):
node = __GetArrayById(array_id)
if tag == AR_LONG:
return node.altval(idx, tag)
elif tag == AR_STR:
res = node.supval(idx, tag)
return 0 if res is None else res
else:
return 0
def del_array_element(tag, array_id, idx):
node = __GetArrayById(array_id)
if tag == AR_LONG:
return node.altdel(idx, tag)
elif tag == AR_STR:
return node.supdel(idx, tag)
else:
return 0
def get_first_index(tag, array_id):
node = __GetArrayById(array_id)
if tag == AR_LONG:
return __l2m1(node.altfirst(tag))
elif tag == AR_STR:
return __l2m1(node.supfirst(tag))
else:
return -1
def get_last_index(tag, array_id):
node = __GetArrayById(array_id)
if tag == AR_LONG:
return __l2m1(node.altlast(tag))
elif tag == AR_STR:
return __l2m1(node.suplast(tag))
else:
return -1
def get_next_index(tag, array_id, idx):
node = __GetArrayById(array_id)
try:
if tag == AR_LONG:
return __l2m1(node.altnext(idx, tag))
elif tag == AR_STR:
return __l2m1(node.supnext(idx, tag))
else:
return -1
except OverflowError:
return -1
def get_prev_index(tag, array_id, idx):
node = __GetArrayById(array_id)
try:
if tag == AR_LONG:
return __l2m1(node.altprev(idx, tag))
elif tag == AR_STR:
return __l2m1(node.supprev(idx, tag))
else:
return -1
except OverflowError:
return -1
def set_hash_long(hash_id, key, value):
return __GetArrayById(hash_id).hashset_idx(key, value)
def get_hash_long(hash_id, key):
return __GetArrayById(hash_id).hashval_long(key);
def set_hash_string(hash_id, key, value):
return __GetArrayById(hash_id).hashset_buf(key, value)
def get_hash_string(hash_id, key):
return __GetArrayById(hash_id).hashstr_buf(key);
def del_hash_string(hash_id, key):
return __GetArrayById(hash_id).hashdel(key)
def get_first_hash_key(hash_id):
r = __GetArrayById(hash_id).hashfirst()
return 0 if r is None else r
def get_last_hash_key(hash_id):
r = __GetArrayById(hash_id).hashlast()
return 0 if r is None else r
def get_next_hash_key(hash_id, key):
r = __GetArrayById(hash_id).hashnext(key)
return 0 if r is None else r
def get_prev_hash_key(hash_id, key):
r = __GetArrayById(hash_id).hashprev(key)
return 0 if r is None else r
add_sourcefile = ida_lines.add_sourcefile
get_sourcefile = ida_lines.get_sourcefile
del_sourcefile = ida_lines.del_sourcefile
set_source_linnum = ida_nalt.set_source_linnum
get_source_linnum = ida_nalt.get_source_linnum
del_source_linnum = ida_nalt.del_source_linnum
def add_default_til(name):
til = ida_typeinf.add_til(name, ida_typeinf.ADDTIL_DEFAULT)
if til:
return 1
else:
return 0
def import_type(idx, type_name):
return ida_typeinf.import_type(None, idx, type_name)
def get_type(ea):
return ida_typeinf.idc_get_type(ea)
def SizeOf(typestr):
return ida_typeinf.calc_type_size(None, typestr)
def get_tinfo(ea):
return ida_typeinf.idc_get_type_raw(ea)
def get_local_tinfo(ordinal):
return ida_typeinf.idc_get_local_type_raw(ordinal)
def guess_type(ea):
return ida_typeinf.idc_guess_type(ea)
TINFO_GUESSED = 0x0000
TINFO_DEFINITE = 0x0001
TINFO_DELAYFUNC = 0x0002
def apply_type(ea, py_type, flags = TINFO_DEFINITE):
if py_type is None:
py_type = ""
if isinstance(py_type, ida_idaapi.string_types) and len(py_type) == 0:
pt = (b"", b"")
else:
if len(py_type) == 3:
pt = py_type[1:]
else:
pt = py_type
return ida_typeinf.apply_type(None, pt[0], pt[1], ea, flags)
PT_SIL = ida_typeinf.PT_SIL
PT_NDC = ida_typeinf.PT_NDC
PT_TYP = ida_typeinf.PT_TYP # return declared type information
PT_VAR = ida_typeinf.PT_VAR # return declared object information
PT_PACKMASK = ida_typeinf.PT_PACKMASK # mask for pack alignment values
PT_HIGH = ida_typeinf.PT_HIGH # assume high level prototypes (with hidden args, etc)
PT_LOWER = ida_typeinf.PT_LOWER # lower the function prototypes
PT_REPLACE = ida_typeinf.PT_REPLACE # replace the old type (used in idc)
PT_RAWARGS = ida_typeinf.PT_RAWARGS # leave argument names unchanged (do not remove underscores)
PT_SILENT = PT_SIL # alias
PT_PAKDEF = 0x0000 # default pack value
PT_PAK1 = 0x0010 # #pragma pack(1)
PT_PAK2 = 0x0020 # #pragma pack(2)
PT_PAK4 = 0x0030 # #pragma pack(4)
PT_PAK8 = 0x0040 # #pragma pack(8)
PT_PAK16 = 0x0050 # #pragma pack(16)
# idc.py-specific
PT_FILE = 0x00010000 # input if a file name (otherwise contains type declarations)
def SetType(ea, newtype):
if newtype != '':
pt = parse_decl(newtype, PT_SIL)
if pt is None:
# parsing failed
return None
else:
pt = None
return apply_type(ea, pt, TINFO_DEFINITE)
def parse_decl(inputtype, flags):
if len(inputtype) != 0 and inputtype[-1] != ';':
inputtype = inputtype + ';'
return ida_typeinf.idc_parse_decl(None, inputtype, flags)
def parse_decls(inputtype, flags = 0):
return ida_typeinf.idc_parse_types(inputtype, flags)
def print_decls(ordinals, flags):
class def_sink(ida_typeinf.text_sink_t):
def __init__(self):
ida_typeinf.text_sink_t.__init__(self)
self.text = ""
def _print(self, defstr):
self.text += defstr
return 0
sink = def_sink()
py_ordinals = list(map(lambda l : int(l), ordinals.split(",")))
ida_typeinf.print_decls(sink, None, py_ordinals, flags)
return sink.text
PDF_INCL_DEPS = 0x1 # include dependencies
PDF_DEF_FWD = 0x2 # allow forward declarations
PDF_DEF_BASE = 0x4 # include base types: __int8, __int16, etc..
PDF_HEADER_CMT = 0x8 # prepend output with a descriptive comment
def get_ordinal_qty():
return ida_typeinf.get_ordinal_qty(None)
def set_local_type(ordinal, input, flags):
return ida_typeinf.idc_set_local_type(ordinal, input, flags)
def GetLocalType(ordinal, flags):
(type, fields) = get_local_tinfo(ordinal)
if type:
name = get_numbered_type_name(ordinal)
return ida_typeinf.idc_print_type(type, fields, name, flags)
return ""
PRTYPE_1LINE = 0x0000 # print to one line
PRTYPE_MULTI = 0x0001 # print to many lines
PRTYPE_TYPE = 0x0002 # print type declaration (not variable declaration)
PRTYPE_PRAGMA = 0x0004 # print pragmas for alignment
def get_numbered_type_name(ordinal):
return ida_typeinf.idc_get_local_type_name(ordinal)
# ----------------------------------------------------------------------------
# H I D D E N A R E A S
# ----------------------------------------------------------------------------
add_hidden_range = ida_bytes.add_hidden_range
def update_hidden_range(ea, visible):
ha = ida_bytes.get_hidden_range(ea)
if not ha:
return 0
else:
ha.visible = visible
return ida_bytes.update_hidden_range(ha)
del_hidden_range = ida_bytes.del_hidden_range
#--------------------------------------------------------------------------
# D E B U G G E R I N T E R F A C E
#--------------------------------------------------------------------------
load_debugger = ida_dbg.load_debugger
start_process = ida_dbg.start_process
exit_process = ida_dbg.exit_process
suspend_process = ida_dbg.suspend_process
get_processes = ida_dbg.get_processes
attach_process = ida_dbg.attach_process
detach_process = ida_dbg.detach_process
get_thread_qty = ida_dbg.get_thread_qty
getn_thread = ida_dbg.getn_thread
get_current_thread = ida_dbg.get_current_thread
getn_thread_name = ida_dbg.getn_thread_name
select_thread = ida_dbg.select_thread
suspend_thread = ida_dbg.suspend_thread
resume_thread = ida_dbg.resume_thread
def _get_modules():
module = ida_idd.modinfo_t()
result = ida_dbg.get_first_module(module)
while result:
yield module
result = ida_dbg.get_next_module(module)
def get_first_module():
for module in _get_modules():
return module.base
else:
return None
def get_next_module(base):
foundit = False
for module in _get_modules():
if foundit:
return module.base
if module.base == base:
foundit = True
else:
return None
def get_module_name(base):
for module in _get_modules():
if module.base == base:
return module.name
else:
return 0
def get_module_size(base):
for module in _get_modules():
if module.base == base:
return module.size
else:
return -1
step_into = ida_dbg.step_into
step_over = ida_dbg.step_over
run_to = ida_dbg.run_to
step_until_ret = ida_dbg.step_until_ret
wait_for_next_event = ida_dbg.wait_for_next_event
def resume_process():
return wait_for_next_event(WFNE_CONT|WFNE_NOWAIT, 0)
def send_dbg_command(cmd):
s = eval_idc('send_dbg_command("%s");' % ida_kernwin.str2user(cmd))
if s.startswith("IDC_FAILURE"):
raise Exception("Debugger command is available only when the debugger is active!")
return s
# wfne flag is combination of the following:
WFNE_ANY = 0x0001 # return the first event (even if it doesn't suspend the process)
WFNE_SUSP = 0x0002
WFNE_SILENT = 0x0004
WFNE_CONT = 0x0008
WFNE_NOWAIT = 0x0010
NOTASK = -2
DBG_ERROR = -1
DBG_TIMEOUT = 0
PROCESS_STARTED = 0x00000001
PROCESS_EXITED = 0x00000002
THREAD_STARTED = 0x00000004
THREAD_EXITED = 0x00000008
BREAKPOINT = 0x00000010
STEP = 0x00000020
EXCEPTION = 0x00000040
LIB_LOADED = 0x00000080
LIB_UNLOADED = 0x00000100
INFORMATION = 0x00000200
PROCESS_ATTACHED = 0x00000400
PROCESS_DETACHED = 0x00000800
PROCESS_SUSPENDED = 0x00001000
refresh_debugger_memory = ida_dbg.refresh_debugger_memory
take_memory_snapshot = ida_segment.take_memory_snapshot
get_process_state = ida_dbg.get_process_state
DSTATE_SUSP = -1
DSTATE_NOTASK = 0
DSTATE_RUN = 1
DSTATE_RUN_WAIT_ATTACH = 2
DSTATE_RUN_WAIT_END = 3
def get_event_id():
ev = ida_dbg.get_debug_event()
assert ev, "Could not retrieve debug event"
return ev.eid()
def get_event_pid():
ev = ida_dbg.get_debug_event()
assert ev, "Could not retrieve debug event"
return ev.pid
def get_event_tid():
ev = ida_dbg.get_debug_event()
assert ev, "Could not retrieve debug event"
return ev.tid
def get_event_ea():
ev = ida_dbg.get_debug_event()
assert ev, "Could not retrieve debug event"
return ev.ea
def is_event_handled():
ev = ida_dbg.get_debug_event()
assert ev, "Could not retrieve debug event"
return ev.handled
def get_event_module_name():
ev = ida_dbg.get_debug_event()
assert ev, "Could not retrieve debug event"
return ida_idd.get_event_module_name(ev)
def get_event_module_base():
ev = ida_dbg.get_debug_event()
assert ev, "Could not retrieve debug event"
return ida_idd.get_event_module_base(ev)
def get_event_module_size():
ev = ida_dbg.get_debug_event()
assert ev, "Could not retrieve debug event"
return ida_idd.get_event_module_size(ev)
def get_event_exit_code():
ev = ida_dbg.get_debug_event()
assert ev, "Could not retrieve debug event"
return ev.exit_code()
def get_event_info():
ev = ida_dbg.get_debug_event()
assert ev, "Could not retrieve debug event"
return ida_idd.get_event_info(ev)
def get_event_bpt_hea():
ev = ida_dbg.get_debug_event()
assert ev, "Could not retrieve debug event"
return ida_idd.get_event_bpt_hea(ev)
def get_event_exc_code():
ev = ida_dbg.get_debug_event()
assert ev, "Could not retrieve debug event"
return ida_idd.get_event_exc_code(ev)
def get_event_exc_ea():
ev = ida_dbg.get_debug_event()
assert ev, "Could not retrieve debug event"
return ida_idd.get_event_exc_ea(ev)
def can_exc_continue():
ev = ida_dbg.get_debug_event()
assert ev, "Could not retrieve debug event"
return ida_idd.can_exc_continue(ev)
def get_event_exc_info():
ev = ida_dbg.get_debug_event()
assert ev, "Could not retrieve debug event"
return ida_idd.get_event_exc_info(ev)
set_debugger_options = ida_dbg.set_debugger_options
DOPT_SEGM_MSGS = 0x00000001
DOPT_START_BPT = 0x00000002
DOPT_THREAD_MSGS = 0x00000004
DOPT_THREAD_BPT = 0x00000008
DOPT_BPT_MSGS = 0x00000010
DOPT_LIB_MSGS = 0x00000040
DOPT_LIB_BPT = 0x00000080
DOPT_INFO_MSGS = 0x00000100
DOPT_INFO_BPT = 0x00000200
DOPT_REAL_MEMORY = 0x00000400
DOPT_REDO_STACK = 0x00000800 # reconstruct the stack
DOPT_ENTRY_BPT = 0x00001000 # break on program entry point
DOPT_EXCDLG = 0x00006000 # exception dialogs:
EXCDLG_NEVER = 0x00000000 # never display exception dialogs
EXCDLG_UNKNOWN = 0x00002000 # display for unknown exceptions
EXCDLG_ALWAYS = 0x00006000 # always display
DOPT_LOAD_DINFO = 0x00008000 # automatically load debug files (pdb)
get_debugger_event_cond = ida_dbg.get_debugger_event_cond
set_debugger_event_cond = ida_dbg.set_debugger_event_cond
set_remote_debugger = ida_dbg.set_remote_debugger
define_exception = ida_dbg.define_exception
EXC_BREAK = 0x0001 # break on the exception
EXC_HANDLE = 0x0002 # should be handled by the debugger?
get_reg_value = ida_dbg.get_reg_val
def set_reg_value(value, name):
return ida_dbg.set_reg_val(name, value)
get_bpt_qty = ida_dbg.get_bpt_qty
def get_bpt_ea(n):
bpt = ida_dbg.bpt_t()
if ida_dbg.getn_bpt(n, bpt):
return bpt.ea
else:
return BADADDR
def get_bpt_attr(ea, bptattr):
bpt = ida_dbg.bpt_t()
if not ida_dbg.get_bpt(ea, bpt):
return -1
else:
if bptattr == BPTATTR_EA:
return bpt.ea
if bptattr == BPTATTR_SIZE:
return bpt.size
if bptattr == BPTATTR_TYPE:
return bpt.type
if bptattr == BPTATTR_COUNT:
return bpt.pass_count
if bptattr == BPTATTR_FLAGS:
return bpt.flags
if bptattr == BPTATTR_COND:
return bpt.condition
if bptattr == BPTATTR_PID:
return bpt.pid
if bptattr == BPTATTR_TID:
return bpt.tid
return -1
BPTATTR_EA = 1 # starting address of the breakpoint
BPTATTR_SIZE = 2 # size of the breakpoint (undefined for software breakpoint)
# type of the breakpoint
BPTATTR_TYPE = 3
# Breakpoint types:
BPT_WRITE = 1 # Hardware: Write access
BPT_RDWR = 3 # Hardware: Read/write access
BPT_SOFT = 4 # Software breakpoint
BPT_EXEC = 8 # Hardware: Execute instruction
BPT_DEFAULT = (BPT_SOFT|BPT_EXEC); # Choose bpt type automaticaly
BPTATTR_COUNT = 4
BPTATTR_FLAGS = 5
BPT_BRK = 0x001 # the debugger stops on this breakpoint
BPT_TRACE = 0x002 # the debugger adds trace information when this breakpoint is reached
BPT_UPDMEM = 0x004 # refresh the memory layout and contents before evaluating bpt condition
BPT_ENABLED = 0x008 # enabled?
BPT_LOWCND = 0x010 # condition is calculated at low level (on the server side)
BPT_TRACEON = 0x020 # enable tracing when the breakpoint is reached
BPT_TRACE_INSN = 0x040 # instruction tracing
BPT_TRACE_FUNC = 0x080 # function tracing
BPT_TRACE_BBLK = 0x100 # basic block tracing
BPTATTR_COND = 6 # Breakpoint condition. NOTE: the return value is a string in this case
BPTATTR_PID = 7 # Brekapoint process id
BPTATTR_TID = 8 # Brekapoint thread id
# Breakpoint location type:
BPLT_ABS = 0 # Absolute address. Attributes:
# - locinfo: absolute address
BPLT_REL = 1 # Module relative address. Attributes:
# - locpath: the module path
# - locinfo: offset from the module base address
BPLT_SYM = 2 # Symbolic name. The name will be resolved on DLL load/unload
# events and on naming an address. Attributes:
# - locpath: symbol name
# - locinfo: offset from the symbol base address
def set_bpt_attr(address, bptattr, value):
bpt = ida_dbg.bpt_t()
if not ida_dbg.get_bpt(address, bpt):
return False
else:
if bptattr not in [ BPTATTR_SIZE, BPTATTR_TYPE, BPTATTR_FLAGS, BPTATTR_COUNT, BPTATTR_PID, BPTATTR_TID ]:
return False
if bptattr == BPTATTR_SIZE:
bpt.size = value
if bptattr == BPTATTR_TYPE:
bpt.type = value
if bptattr == BPTATTR_COUNT:
bpt.pass_count = value
if bptattr == BPTATTR_FLAGS:
bpt.flags = value
if bptattr == BPTATTR_PID:
bpt.pid = value
if bptattr == BPTATTR_TID:
bpt.tid = value
return ida_dbg.update_bpt(bpt)
def set_bpt_cond(ea, cnd, is_lowcnd=0):
bpt = ida_dbg.bpt_t()
if not ida_dbg.get_bpt(ea, bpt):
return False
bpt.condition = cnd
if is_lowcnd:
bpt.flags |= BPT_LOWCND
else:
bpt.flags &= ~BPT_LOWCND
return ida_dbg.update_bpt(bpt)
add_bpt = ida_dbg.add_bpt
del_bpt = ida_dbg.del_bpt
enable_bpt = ida_dbg.enable_bpt
check_bpt = ida_dbg.check_bpt
BPTCK_NONE = -1 # breakpoint does not exist
BPTCK_NO = 0 # breakpoint is disabled
BPTCK_YES = 1 # breakpoint is enabled
BPTCK_ACT = 2 # breakpoint is active (written to the process)
def enable_tracing(trace_level, enable):
assert trace_level in [ TRACE_STEP, TRACE_INSN, TRACE_FUNC ], \
"trace_level must be one of TRACE_* constants"
if trace_level == TRACE_STEP:
return ida_dbg.enable_step_trace(enable)
if trace_level == TRACE_INSN:
return ida_dbg.enable_insn_trace(enable)
if trace_level == TRACE_FUNC:
return ida_dbg.enable_func_trace(enable)
return False
TRACE_STEP = 0x0 # lowest level trace. trace buffers are not maintained
TRACE_INSN = 0x1 # instruction level trace
TRACE_FUNC = 0x2 # function level trace (calls & rets)
get_step_trace_options = ida_dbg.get_step_trace_options
set_step_trace_options = ida_dbg.set_step_trace_options
ST_OVER_DEBUG_SEG = 0x01 # step tracing will be disabled when IP is in a debugger segment
ST_OVER_LIB_FUNC = 0x02 # step tracing will be disabled when IP is in a library function
ST_ALREADY_LOGGED = 0x04 # step tracing will be disabled when IP is already logged
ST_SKIP_LOOPS = 0x08 # step tracing will try to skip loops already recorded
load_trace_file = ida_dbg.load_trace_file
save_trace_file = ida_dbg.save_trace_file
is_valid_trace_file = ida_dbg.is_valid_trace_file
diff_trace_file = ida_dbg.diff_trace_file
def clear_trace(filename):
return ida_dbg.clear_trace()
get_trace_file_desc = ida_dbg.get_trace_file_desc
set_trace_file_desc = ida_dbg.set_trace_file_desc
get_tev_qty = ida_dbg.get_tev_qty
get_tev_ea = ida_dbg.get_tev_ea
TEV_NONE = 0 # no event
TEV_INSN = 1 # an instruction trace
TEV_CALL = 2 # a function call trace
TEV_RET = 3 # a function return trace
TEV_BPT = 4 # write, read/write, execution trace
TEV_MEM = 5 # memory layout changed
TEV_EVENT = 6 # debug event
get_tev_type = ida_dbg.get_tev_type
get_tev_tid = ida_dbg.get_tev_tid
get_tev_reg = ida_dbg.get_tev_reg_val
get_tev_mem_qty = ida_dbg.get_tev_reg_mem_qty
get_tev_mem = ida_dbg.get_tev_reg_mem
get_tev_mem_ea = ida_dbg.get_tev_reg_mem_ea
get_call_tev_callee = ida_dbg.get_call_tev_callee
get_ret_tev_return = ida_dbg.get_ret_tev_return
get_bpt_tev_ea = ida_dbg.get_bpt_tev_ea
#--------------------------------------------------------------------------
# C O L O R S
#--------------------------------------------------------------------------
def get_color(ea, what):
if what not in [ CIC_ITEM, CIC_FUNC, CIC_SEGM ]:
raise ValueError("'what' must be one of CIC_ITEM, CIC_FUNC and CIC_SEGM")
if what == CIC_ITEM:
return ida_nalt.get_item_color(ea)
if what == CIC_FUNC:
func = ida_funcs.get_func(ea)
if func:
return func.color
else:
return DEFCOLOR
if what == CIC_SEGM:
seg = ida_segment.getseg(ea)
if seg:
return seg.color
else:
return DEFCOLOR
# color item codes:
CIC_ITEM = 1 # one instruction or data
CIC_FUNC = 2 # function
CIC_SEGM = 3 # segment
DEFCOLOR = 0xFFFFFFFF # Default color
def set_color(ea, what, color):
if what not in [ CIC_ITEM, CIC_FUNC, CIC_SEGM ]:
raise ValueError("'what' must be one of CIC_ITEM, CIC_FUNC and CIC_SEGM")
if what == CIC_ITEM:
return ida_nalt.set_item_color(ea, color)
if what == CIC_FUNC:
func = ida_funcs.get_func(ea)
if func:
func.color = color
return bool(ida_funcs.update_func(func))
else:
return False
if what == CIC_SEGM:
seg = ida_segment.getseg(ea)
if seg:
seg.color = color
return bool(seg.update())
else:
return False
#----------------------------------------------------------------------------
# A R M S P E C I F I C
#----------------------------------------------------------------------------
def force_bl_jump(ea):
return eval_idc("force_bl_jump(0x%x)"%ea)
def force_bl_call(ea):
return eval_idc("force_bl_call(0x%x)"%ea)
#--------------------------------------------------------------------------
def set_flag(off, bit, value):
v = get_inf_attr(off)
if value:
v = v | bit
else:
v = v & ~bit
set_inf_attr(off, v)
# Convenience functions:
def here(): return get_screen_ea()
def is_mapped(ea): return (prev_addr(ea+1)==ea)
ARGV = []
# END OF IDC COMPATIBILY CODE
| true | true |
f71ecf2af3703ebe08b3729f80b87a0c1c645aa0 | 16,293 | py | Python | .venv/lib/python3.8/site-packages/google/resumable_media/requests/upload.py | taharh/label-studio | fab68de11bdc6699472d12a78390375928258e1e | [
"Apache-2.0"
] | 2 | 2020-05-17T12:53:06.000Z | 2021-04-12T02:13:43.000Z | .venv/lib/python3.8/site-packages/google/resumable_media/requests/upload.py | taharh/label-studio | fab68de11bdc6699472d12a78390375928258e1e | [
"Apache-2.0"
] | 16 | 2021-03-19T09:44:52.000Z | 2022-03-12T00:22:14.000Z | .venv/lib/python3.8/site-packages/google/resumable_media/requests/upload.py | taharh/label-studio | fab68de11bdc6699472d12a78390375928258e1e | [
"Apache-2.0"
] | 1 | 2021-08-06T04:07:10.000Z | 2021-08-06T04:07:10.000Z | # Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Support for resumable uploads.
Also supported here are simple (media) uploads and multipart
uploads that contain both metadata and a small file as payload.
"""
from google.resumable_media import _upload
from google.resumable_media.requests import _helpers
class SimpleUpload(_helpers.RequestsMixin, _upload.SimpleUpload):
"""Upload a resource to a Google API.
A **simple** media upload sends no metadata and completes the upload
in a single request.
Args:
upload_url (str): The URL where the content will be uploaded.
headers (Optional[Mapping[str, str]]): Extra headers that should
be sent with the request, e.g. headers for encrypted data.
Attributes:
upload_url (str): The URL where the content will be uploaded.
"""
def transmit(self, transport, data, content_type):
"""Transmit the resource to be uploaded.
Args:
transport (~requests.Session): A ``requests`` object which can
make authenticated requests.
data (bytes): The resource content to be uploaded.
content_type (str): The content type of the resource, e.g. a JPEG
image has content type ``image/jpeg``.
Returns:
~requests.Response: The HTTP response returned by ``transport``.
"""
method, url, payload, headers = self._prepare_request(data, content_type)
response = _helpers.http_request(
transport,
method,
url,
data=payload,
headers=headers,
retry_strategy=self._retry_strategy,
)
self._process_response(response)
return response
class MultipartUpload(_helpers.RequestsMixin, _upload.MultipartUpload):
"""Upload a resource with metadata to a Google API.
A **multipart** upload sends both metadata and the resource in a single
(multipart) request.
Args:
upload_url (str): The URL where the content will be uploaded.
headers (Optional[Mapping[str, str]]): Extra headers that should
be sent with the request, e.g. headers for encrypted data.
Attributes:
upload_url (str): The URL where the content will be uploaded.
"""
def transmit(self, transport, data, metadata, content_type):
"""Transmit the resource to be uploaded.
Args:
transport (~requests.Session): A ``requests`` object which can
make authenticated requests.
data (bytes): The resource content to be uploaded.
metadata (Mapping[str, str]): The resource metadata, such as an
ACL list.
content_type (str): The content type of the resource, e.g. a JPEG
image has content type ``image/jpeg``.
Returns:
~requests.Response: The HTTP response returned by ``transport``.
"""
method, url, payload, headers = self._prepare_request(
data, metadata, content_type
)
response = _helpers.http_request(
transport,
method,
url,
data=payload,
headers=headers,
retry_strategy=self._retry_strategy,
)
self._process_response(response)
return response
class ResumableUpload(_helpers.RequestsMixin, _upload.ResumableUpload):
"""Initiate and fulfill a resumable upload to a Google API.
A **resumable** upload sends an initial request with the resource metadata
and then gets assigned an upload ID / upload URL to send bytes to.
Using the upload URL, the upload is then done in chunks (determined by
the user) until all bytes have been uploaded.
When constructing a resumable upload, only the resumable upload URL and
the chunk size are required:
.. testsetup:: resumable-constructor
bucket = u'bucket-foo'
.. doctest:: resumable-constructor
>>> from google.resumable_media.requests import ResumableUpload
>>>
>>> url_template = (
... u'https://www.googleapis.com/upload/storage/v1/b/{bucket}/o?'
... u'uploadType=resumable')
>>> upload_url = url_template.format(bucket=bucket)
>>>
>>> chunk_size = 3 * 1024 * 1024 # 3MB
>>> upload = ResumableUpload(upload_url, chunk_size)
When initiating an upload (via :meth:`initiate`), the caller is expected
to pass the resource being uploaded as a file-like ``stream``. If the size
of the resource is explicitly known, it can be passed in directly:
.. testsetup:: resumable-explicit-size
import os
import tempfile
import mock
import requests
from six.moves import http_client
from google.resumable_media.requests import ResumableUpload
upload_url = u'http://test.invalid'
chunk_size = 3 * 1024 * 1024 # 3MB
upload = ResumableUpload(upload_url, chunk_size)
file_desc, filename = tempfile.mkstemp()
os.close(file_desc)
data = b'some bytes!'
with open(filename, u'wb') as file_obj:
file_obj.write(data)
fake_response = requests.Response()
fake_response.status_code = int(http_client.OK)
fake_response._content = b''
resumable_url = u'http://test.invalid?upload_id=7up'
fake_response.headers[u'location'] = resumable_url
post_method = mock.Mock(return_value=fake_response, spec=[])
transport = mock.Mock(request=post_method, spec=['request'])
.. doctest:: resumable-explicit-size
>>> import os
>>>
>>> upload.total_bytes is None
True
>>>
>>> stream = open(filename, u'rb')
>>> total_bytes = os.path.getsize(filename)
>>> metadata = {u'name': filename}
>>> response = upload.initiate(
... transport, stream, metadata, u'text/plain',
... total_bytes=total_bytes)
>>> response
<Response [200]>
>>>
>>> upload.total_bytes == total_bytes
True
.. testcleanup:: resumable-explicit-size
os.remove(filename)
If the stream is in a "final" state (i.e. it won't have any more bytes
written to it), the total number of bytes can be determined implicitly
from the ``stream`` itself:
.. testsetup:: resumable-implicit-size
import io
import mock
import requests
from six.moves import http_client
from google.resumable_media.requests import ResumableUpload
upload_url = u'http://test.invalid'
chunk_size = 3 * 1024 * 1024 # 3MB
upload = ResumableUpload(upload_url, chunk_size)
fake_response = requests.Response()
fake_response.status_code = int(http_client.OK)
fake_response._content = b''
resumable_url = u'http://test.invalid?upload_id=7up'
fake_response.headers[u'location'] = resumable_url
post_method = mock.Mock(return_value=fake_response, spec=[])
transport = mock.Mock(request=post_method, spec=['request'])
data = b'some MOAR bytes!'
metadata = {u'name': u'some-file.jpg'}
content_type = u'image/jpeg'
.. doctest:: resumable-implicit-size
>>> stream = io.BytesIO(data)
>>> response = upload.initiate(
... transport, stream, metadata, content_type)
>>>
>>> upload.total_bytes == len(data)
True
If the size of the resource is **unknown** when the upload is initiated,
the ``stream_final`` argument can be used. This might occur if the
resource is being dynamically created on the client (e.g. application
logs). To use this argument:
.. testsetup:: resumable-unknown-size
import io
import mock
import requests
from six.moves import http_client
from google.resumable_media.requests import ResumableUpload
upload_url = u'http://test.invalid'
chunk_size = 3 * 1024 * 1024 # 3MB
upload = ResumableUpload(upload_url, chunk_size)
fake_response = requests.Response()
fake_response.status_code = int(http_client.OK)
fake_response._content = b''
resumable_url = u'http://test.invalid?upload_id=7up'
fake_response.headers[u'location'] = resumable_url
post_method = mock.Mock(return_value=fake_response, spec=[])
transport = mock.Mock(request=post_method, spec=['request'])
metadata = {u'name': u'some-file.jpg'}
content_type = u'application/octet-stream'
stream = io.BytesIO(b'data')
.. doctest:: resumable-unknown-size
>>> response = upload.initiate(
... transport, stream, metadata, content_type,
... stream_final=False)
>>>
>>> upload.total_bytes is None
True
Args:
upload_url (str): The URL where the resumable upload will be initiated.
chunk_size (int): The size of each chunk used to upload the resource.
headers (Optional[Mapping[str, str]]): Extra headers that should
be sent with the :meth:`initiate` request, e.g. headers for
encrypted data. These **will not** be sent with
:meth:`transmit_next_chunk` or :meth:`recover` requests.
Attributes:
upload_url (str): The URL where the content will be uploaded.
Raises:
ValueError: If ``chunk_size`` is not a multiple of
:data:`.UPLOAD_CHUNK_SIZE`.
"""
def initiate(
self,
transport,
stream,
metadata,
content_type,
total_bytes=None,
stream_final=True,
):
"""Initiate a resumable upload.
By default, this method assumes your ``stream`` is in a "final"
state ready to transmit. However, ``stream_final=False`` can be used
to indicate that the size of the resource is not known. This can happen
if bytes are being dynamically fed into ``stream``, e.g. if the stream
is attached to application logs.
If ``stream_final=False`` is used, :attr:`chunk_size` bytes will be
read from the stream every time :meth:`transmit_next_chunk` is called.
If one of those reads produces strictly fewer bites than the chunk
size, the upload will be concluded.
Args:
transport (~requests.Session): A ``requests`` object which can
make authenticated requests.
stream (IO[bytes]): The stream (i.e. file-like object) that will
be uploaded. The stream **must** be at the beginning (i.e.
``stream.tell() == 0``).
metadata (Mapping[str, str]): The resource metadata, such as an
ACL list.
content_type (str): The content type of the resource, e.g. a JPEG
image has content type ``image/jpeg``.
total_bytes (Optional[int]): The total number of bytes to be
uploaded. If specified, the upload size **will not** be
determined from the stream (even if ``stream_final=True``).
stream_final (Optional[bool]): Indicates if the ``stream`` is
"final" (i.e. no more bytes will be added to it). In this case
we determine the upload size from the size of the stream. If
``total_bytes`` is passed, this argument will be ignored.
Returns:
~requests.Response: The HTTP response returned by ``transport``.
"""
method, url, payload, headers = self._prepare_initiate_request(
stream,
metadata,
content_type,
total_bytes=total_bytes,
stream_final=stream_final,
)
response = _helpers.http_request(
transport,
method,
url,
data=payload,
headers=headers,
retry_strategy=self._retry_strategy,
)
self._process_initiate_response(response)
return response
def transmit_next_chunk(self, transport):
"""Transmit the next chunk of the resource to be uploaded.
If the current upload was initiated with ``stream_final=False``,
this method will dynamically determine if the upload has completed.
The upload will be considered complete if the stream produces
fewer than :attr:`chunk_size` bytes when a chunk is read from it.
In the case of failure, an exception is thrown that preserves the
failed response:
.. testsetup:: bad-response
import io
import mock
import requests
from six.moves import http_client
from google import resumable_media
import google.resumable_media.requests.upload as upload_mod
transport = mock.Mock(spec=['request'])
fake_response = requests.Response()
fake_response.status_code = int(http_client.BAD_REQUEST)
transport.request.return_value = fake_response
upload_url = u'http://test.invalid'
upload = upload_mod.ResumableUpload(
upload_url, resumable_media.UPLOAD_CHUNK_SIZE)
# Fake that the upload has been initiate()-d
data = b'data is here'
upload._stream = io.BytesIO(data)
upload._total_bytes = len(data)
upload._resumable_url = u'http://test.invalid?upload_id=nope'
.. doctest:: bad-response
:options: +NORMALIZE_WHITESPACE
>>> error = None
>>> try:
... upload.transmit_next_chunk(transport)
... except resumable_media.InvalidResponse as caught_exc:
... error = caught_exc
...
>>> error
InvalidResponse('Request failed with status code', 400,
'Expected one of', <HTTPStatus.OK: 200>, 308)
>>> error.response
<Response [400]>
Args:
transport (~requests.Session): A ``requests`` object which can
make authenticated requests.
Returns:
~requests.Response: The HTTP response returned by ``transport``.
Raises:
~google.resumable_media.common.InvalidResponse: If the status
code is not 200 or 308.
"""
method, url, payload, headers = self._prepare_request()
response = _helpers.http_request(
transport,
method,
url,
data=payload,
headers=headers,
retry_strategy=self._retry_strategy,
)
self._process_response(response, len(payload))
return response
def recover(self, transport):
"""Recover from a failure.
This method should be used when a :class:`ResumableUpload` is in an
:attr:`~ResumableUpload.invalid` state due to a request failure.
This will verify the progress with the server and make sure the
current upload is in a valid state before :meth:`transmit_next_chunk`
can be used again.
Args:
transport (~requests.Session): A ``requests`` object which can
make authenticated requests.
Returns:
~requests.Response: The HTTP response returned by ``transport``.
"""
method, url, payload, headers = self._prepare_recover_request()
# NOTE: We assume "payload is None" but pass it along anyway.
response = _helpers.http_request(
transport,
method,
url,
data=payload,
headers=headers,
retry_strategy=self._retry_strategy,
)
self._process_recover_response(response)
return response
| 35.496732 | 81 | 0.624072 |
from google.resumable_media import _upload
from google.resumable_media.requests import _helpers
class SimpleUpload(_helpers.RequestsMixin, _upload.SimpleUpload):
def transmit(self, transport, data, content_type):
method, url, payload, headers = self._prepare_request(data, content_type)
response = _helpers.http_request(
transport,
method,
url,
data=payload,
headers=headers,
retry_strategy=self._retry_strategy,
)
self._process_response(response)
return response
class MultipartUpload(_helpers.RequestsMixin, _upload.MultipartUpload):
def transmit(self, transport, data, metadata, content_type):
method, url, payload, headers = self._prepare_request(
data, metadata, content_type
)
response = _helpers.http_request(
transport,
method,
url,
data=payload,
headers=headers,
retry_strategy=self._retry_strategy,
)
self._process_response(response)
return response
class ResumableUpload(_helpers.RequestsMixin, _upload.ResumableUpload):
def initiate(
self,
transport,
stream,
metadata,
content_type,
total_bytes=None,
stream_final=True,
):
method, url, payload, headers = self._prepare_initiate_request(
stream,
metadata,
content_type,
total_bytes=total_bytes,
stream_final=stream_final,
)
response = _helpers.http_request(
transport,
method,
url,
data=payload,
headers=headers,
retry_strategy=self._retry_strategy,
)
self._process_initiate_response(response)
return response
def transmit_next_chunk(self, transport):
method, url, payload, headers = self._prepare_request()
response = _helpers.http_request(
transport,
method,
url,
data=payload,
headers=headers,
retry_strategy=self._retry_strategy,
)
self._process_response(response, len(payload))
return response
def recover(self, transport):
method, url, payload, headers = self._prepare_recover_request()
response = _helpers.http_request(
transport,
method,
url,
data=payload,
headers=headers,
retry_strategy=self._retry_strategy,
)
self._process_recover_response(response)
return response
| true | true |
f71ecf71aebb9e5ffec8c7a30cb6b389a0ce74cf | 64,303 | py | Python | Lib/site-packages/sqlalchemy/orm/properties.py | mariorodolfo/arrow | aa8085fd69a089650935e65e397447df8772f2b5 | [
"BSD-3-Clause"
] | 2 | 2015-11-05T09:43:45.000Z | 2017-05-31T14:22:02.000Z | Lib/site-packages/sqlalchemy/orm/properties.py | mariorodolfo/arrow | aa8085fd69a089650935e65e397447df8772f2b5 | [
"BSD-3-Clause"
] | null | null | null | Lib/site-packages/sqlalchemy/orm/properties.py | mariorodolfo/arrow | aa8085fd69a089650935e65e397447df8772f2b5 | [
"BSD-3-Clause"
] | null | null | null | # orm/properties.py
# Copyright (C) 2005-2012 the SQLAlchemy authors and contributors <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""MapperProperty implementations.
This is a private module which defines the behavior of invidual ORM-
mapped attributes.
"""
from sqlalchemy import sql, util, log, exc as sa_exc
from sqlalchemy.sql.util import ClauseAdapter, criterion_as_pairs, \
join_condition, _shallow_annotate
from sqlalchemy.sql import operators, expression
from sqlalchemy.orm import attributes, dependency, mapper, \
object_mapper, strategies, configure_mappers
from sqlalchemy.orm.util import CascadeOptions, _class_to_mapper, \
_orm_annotate, _orm_deannotate
from sqlalchemy.orm.interfaces import MANYTOMANY, MANYTOONE, \
MapperProperty, ONETOMANY, PropComparator, StrategizedProperty
mapperlib = util.importlater("sqlalchemy.orm", "mapperlib")
NoneType = type(None)
__all__ = ('ColumnProperty', 'CompositeProperty', 'SynonymProperty',
'ComparableProperty', 'RelationshipProperty', 'RelationProperty')
from descriptor_props import CompositeProperty, SynonymProperty, \
ComparableProperty,ConcreteInheritedProperty
class ColumnProperty(StrategizedProperty):
"""Describes an object attribute that corresponds to a table column.
Public constructor is the :func:`.orm.column_property` function.
"""
def __init__(self, *columns, **kwargs):
"""Construct a ColumnProperty.
Note the public constructor is the :func:`.orm.column_property` function.
:param \*columns: The list of `columns` describes a single
object property. If there are multiple tables joined
together for the mapper, this list represents the equivalent
column as it appears across each table.
:param group:
:param deferred:
:param comparator_factory:
:param descriptor:
:param expire_on_flush:
:param extension:
"""
self._orig_columns = [expression._labeled(c) for c in columns]
self.columns = [expression._labeled(_orm_deannotate(c))
for c in columns]
self.group = kwargs.pop('group', None)
self.deferred = kwargs.pop('deferred', False)
self.instrument = kwargs.pop('_instrument', True)
self.comparator_factory = kwargs.pop('comparator_factory',
self.__class__.Comparator)
self.descriptor = kwargs.pop('descriptor', None)
self.extension = kwargs.pop('extension', None)
self.active_history = kwargs.pop('active_history', False)
self.expire_on_flush = kwargs.pop('expire_on_flush', True)
if 'doc' in kwargs:
self.doc = kwargs.pop('doc')
else:
for col in reversed(self.columns):
doc = getattr(col, 'doc', None)
if doc is not None:
self.doc = doc
break
else:
self.doc = None
if kwargs:
raise TypeError(
"%s received unexpected keyword argument(s): %s" % (
self.__class__.__name__,
', '.join(sorted(kwargs.keys()))))
util.set_creation_order(self)
if not self.instrument:
self.strategy_class = strategies.UninstrumentedColumnLoader
elif self.deferred:
self.strategy_class = strategies.DeferredColumnLoader
else:
self.strategy_class = strategies.ColumnLoader
def instrument_class(self, mapper):
if not self.instrument:
return
attributes.register_descriptor(
mapper.class_,
self.key,
comparator=self.comparator_factory(self, mapper),
parententity=mapper,
doc=self.doc
)
def do_init(self):
super(ColumnProperty, self).do_init()
if len(self.columns) > 1 and \
set(self.parent.primary_key).issuperset(self.columns):
util.warn(
("On mapper %s, primary key column '%s' is being combined "
"with distinct primary key column '%s' in attribute '%s'. "
"Use explicit properties to give each column its own mapped "
"attribute name.") % (self.parent, self.columns[1],
self.columns[0], self.key))
def copy(self):
return ColumnProperty(
deferred=self.deferred,
group=self.group,
active_history=self.active_history,
*self.columns)
def _getcommitted(self, state, dict_, column,
passive=attributes.PASSIVE_OFF):
return state.get_impl(self.key).\
get_committed_value(state, dict_, passive=passive)
def merge(self, session, source_state, source_dict, dest_state,
dest_dict, load, _recursive):
if not self.instrument:
return
elif self.key in source_dict:
value = source_dict[self.key]
if not load:
dest_dict[self.key] = value
else:
impl = dest_state.get_impl(self.key)
impl.set(dest_state, dest_dict, value, None)
elif dest_state.has_identity and self.key not in dest_dict:
dest_state.expire_attributes(dest_dict, [self.key])
class Comparator(PropComparator):
@util.memoized_instancemethod
def __clause_element__(self):
if self.adapter:
return self.adapter(self.prop.columns[0])
else:
return self.prop.columns[0]._annotate({
"parententity": self.mapper,
"parentmapper":self.mapper})
def operate(self, op, *other, **kwargs):
return op(self.__clause_element__(), *other, **kwargs)
def reverse_operate(self, op, other, **kwargs):
col = self.__clause_element__()
return op(col._bind_param(op, other), col, **kwargs)
# TODO: legacy..do we need this ? (0.5)
ColumnComparator = Comparator
def __str__(self):
return str(self.parent.class_.__name__) + "." + self.key
log.class_logger(ColumnProperty)
class RelationshipProperty(StrategizedProperty):
"""Describes an object property that holds a single item or list
of items that correspond to a related database table.
Public constructor is the :func:`.orm.relationship` function.
Of note here is the :class:`.RelationshipProperty.Comparator`
class, which implements comparison operations for scalar-
and collection-referencing mapped attributes.
"""
strategy_wildcard_key = 'relationship:*'
def __init__(self, argument,
secondary=None, primaryjoin=None,
secondaryjoin=None,
foreign_keys=None,
uselist=None,
order_by=False,
backref=None,
back_populates=None,
post_update=False,
cascade=False, extension=None,
viewonly=False, lazy=True,
collection_class=None, passive_deletes=False,
passive_updates=True, remote_side=None,
enable_typechecks=True, join_depth=None,
comparator_factory=None,
single_parent=False, innerjoin=False,
doc=None,
active_history=False,
cascade_backrefs=True,
load_on_pending=False,
strategy_class=None, _local_remote_pairs=None,
query_class=None):
self.uselist = uselist
self.argument = argument
self.secondary = secondary
self.primaryjoin = primaryjoin
self.secondaryjoin = secondaryjoin
self.post_update = post_update
self.direction = None
self.viewonly = viewonly
self.lazy = lazy
self.single_parent = single_parent
self._user_defined_foreign_keys = foreign_keys
self.collection_class = collection_class
self.passive_deletes = passive_deletes
self.cascade_backrefs = cascade_backrefs
self.passive_updates = passive_updates
self.remote_side = remote_side
self.enable_typechecks = enable_typechecks
self.query_class = query_class
self.innerjoin = innerjoin
self.doc = doc
self.active_history = active_history
self.join_depth = join_depth
self.local_remote_pairs = _local_remote_pairs
self.extension = extension
self.load_on_pending = load_on_pending
self.comparator_factory = comparator_factory or \
RelationshipProperty.Comparator
self.comparator = self.comparator_factory(self, None)
util.set_creation_order(self)
if strategy_class:
self.strategy_class = strategy_class
elif self.lazy== 'dynamic':
from sqlalchemy.orm import dynamic
self.strategy_class = dynamic.DynaLoader
else:
self.strategy_class = strategies.factory(self.lazy)
self._reverse_property = set()
if cascade is not False:
self.cascade = CascadeOptions(cascade)
else:
self.cascade = CascadeOptions("save-update, merge")
if self.passive_deletes == 'all' and \
("delete" in self.cascade or
"delete-orphan" in self.cascade):
raise sa_exc.ArgumentError(
"Can't set passive_deletes='all' in conjunction "
"with 'delete' or 'delete-orphan' cascade")
self.order_by = order_by
self.back_populates = back_populates
if self.back_populates:
if backref:
raise sa_exc.ArgumentError(
"backref and back_populates keyword arguments "
"are mutually exclusive")
self.backref = None
else:
self.backref = backref
def instrument_class(self, mapper):
attributes.register_descriptor(
mapper.class_,
self.key,
comparator=self.comparator_factory(self, mapper),
parententity=mapper,
doc=self.doc,
)
class Comparator(PropComparator):
"""Produce comparison operations for :func:`~.orm.relationship`-based
attributes."""
def __init__(self, prop, mapper, of_type=None, adapter=None):
"""Construction of :class:`.RelationshipProperty.Comparator`
is internal to the ORM's attribute mechanics.
"""
self.prop = prop
self.mapper = mapper
self.adapter = adapter
if of_type:
self._of_type = _class_to_mapper(of_type)
def adapted(self, adapter):
"""Return a copy of this PropComparator which will use the
given adaption function on the local side of generated
expressions.
"""
return self.__class__(self.property, self.mapper,
getattr(self, '_of_type', None),
adapter)
@property
def parententity(self):
return self.property.parent
def __clause_element__(self):
elem = self.property.parent._with_polymorphic_selectable
if self.adapter:
return self.adapter(elem)
else:
return elem
def of_type(self, cls):
"""Produce a construct that represents a particular 'subtype' of
attribute for the parent class.
Currently this is usable in conjunction with :meth:`.Query.join`
and :meth:`.Query.outerjoin`.
"""
return RelationshipProperty.Comparator(
self.property,
self.mapper,
cls, adapter=self.adapter)
def in_(self, other):
"""Produce an IN clause - this is not implemented
for :func:`~.orm.relationship`-based attributes at this time.
"""
raise NotImplementedError('in_() not yet supported for '
'relationships. For a simple many-to-one, use '
'in_() against the set of foreign key values.')
__hash__ = None
def __eq__(self, other):
"""Implement the ``==`` operator.
In a many-to-one context, such as::
MyClass.some_prop == <some object>
this will typically produce a
clause such as::
mytable.related_id == <some id>
Where ``<some id>`` is the primary key of the given
object.
The ``==`` operator provides partial functionality for non-
many-to-one comparisons:
* Comparisons against collections are not supported.
Use :meth:`~.RelationshipProperty.Comparator.contains`.
* Compared to a scalar one-to-many, will produce a
clause that compares the target columns in the parent to
the given target.
* Compared to a scalar many-to-many, an alias
of the association table will be rendered as
well, forming a natural join that is part of the
main body of the query. This will not work for
queries that go beyond simple AND conjunctions of
comparisons, such as those which use OR. Use
explicit joins, outerjoins, or
:meth:`~.RelationshipProperty.Comparator.has` for
more comprehensive non-many-to-one scalar
membership tests.
* Comparisons against ``None`` given in a one-to-many
or many-to-many context produce a NOT EXISTS clause.
"""
if isinstance(other, (NoneType, expression._Null)):
if self.property.direction in [ONETOMANY, MANYTOMANY]:
return ~self._criterion_exists()
else:
return _orm_annotate(self.property._optimized_compare(
None, adapt_source=self.adapter))
elif self.property.uselist:
raise sa_exc.InvalidRequestError("Can't compare a colle"
"ction to an object or collection; use "
"contains() to test for membership.")
else:
return _orm_annotate(self.property._optimized_compare(other,
adapt_source=self.adapter))
def _criterion_exists(self, criterion=None, **kwargs):
if getattr(self, '_of_type', None):
target_mapper = self._of_type
to_selectable = target_mapper._with_polymorphic_selectable
if self.property._is_self_referential:
to_selectable = to_selectable.alias()
single_crit = target_mapper._single_table_criterion
if single_crit is not None:
if criterion is not None:
criterion = single_crit & criterion
else:
criterion = single_crit
else:
to_selectable = None
if self.adapter:
source_selectable = self.__clause_element__()
else:
source_selectable = None
pj, sj, source, dest, secondary, target_adapter = \
self.property._create_joins(dest_polymorphic=True,
dest_selectable=to_selectable,
source_selectable=source_selectable)
for k in kwargs:
crit = getattr(self.property.mapper.class_, k) == kwargs[k]
if criterion is None:
criterion = crit
else:
criterion = criterion & crit
# annotate the *local* side of the join condition, in the case
# of pj + sj this is the full primaryjoin, in the case of just
# pj its the local side of the primaryjoin.
if sj is not None:
j = _orm_annotate(pj) & sj
else:
j = _orm_annotate(pj, exclude=self.property.remote_side)
if criterion is not None and target_adapter:
# limit this adapter to annotated only?
criterion = target_adapter.traverse(criterion)
# only have the "joined left side" of what we
# return be subject to Query adaption. The right
# side of it is used for an exists() subquery and
# should not correlate or otherwise reach out
# to anything in the enclosing query.
if criterion is not None:
criterion = criterion._annotate({'no_replacement_traverse': True})
crit = j & criterion
return sql.exists([1], crit, from_obj=dest).\
correlate(source._annotate({'_orm_adapt':True}))
def any(self, criterion=None, **kwargs):
"""Produce an expression that tests a collection against
particular criterion, using EXISTS.
An expression like::
session.query(MyClass).filter(
MyClass.somereference.any(SomeRelated.x==2)
)
Will produce a query like::
SELECT * FROM my_table WHERE
EXISTS (SELECT 1 FROM related WHERE related.my_id=my_table.id
AND related.x=2)
Because :meth:`~.RelationshipProperty.Comparator.any` uses
a correlated subquery, its performance is not nearly as
good when compared against large target tables as that of
using a join.
:meth:`~.RelationshipProperty.Comparator.any` is particularly
useful for testing for empty collections::
session.query(MyClass).filter(
~MyClass.somereference.any()
)
will produce::
SELECT * FROM my_table WHERE
NOT EXISTS (SELECT 1 FROM related WHERE related.my_id=my_table.id)
:meth:`~.RelationshipProperty.Comparator.any` is only
valid for collections, i.e. a :func:`.relationship`
that has ``uselist=True``. For scalar references,
use :meth:`~.RelationshipProperty.Comparator.has`.
"""
if not self.property.uselist:
raise sa_exc.InvalidRequestError(
"'any()' not implemented for scalar "
"attributes. Use has()."
)
return self._criterion_exists(criterion, **kwargs)
def has(self, criterion=None, **kwargs):
"""Produce an expression that tests a scalar reference against
particular criterion, using EXISTS.
An expression like::
session.query(MyClass).filter(
MyClass.somereference.has(SomeRelated.x==2)
)
Will produce a query like::
SELECT * FROM my_table WHERE
EXISTS (SELECT 1 FROM related WHERE related.id==my_table.related_id
AND related.x=2)
Because :meth:`~.RelationshipProperty.Comparator.has` uses
a correlated subquery, its performance is not nearly as
good when compared against large target tables as that of
using a join.
:meth:`~.RelationshipProperty.Comparator.has` is only
valid for scalar references, i.e. a :func:`.relationship`
that has ``uselist=False``. For collection references,
use :meth:`~.RelationshipProperty.Comparator.any`.
"""
if self.property.uselist:
raise sa_exc.InvalidRequestError(
"'has()' not implemented for collections. "
"Use any().")
return self._criterion_exists(criterion, **kwargs)
def contains(self, other, **kwargs):
"""Return a simple expression that tests a collection for
containment of a particular item.
:meth:`~.RelationshipProperty.Comparator.contains` is
only valid for a collection, i.e. a
:func:`~.orm.relationship` that implements
one-to-many or many-to-many with ``uselist=True``.
When used in a simple one-to-many context, an
expression like::
MyClass.contains(other)
Produces a clause like::
mytable.id == <some id>
Where ``<some id>`` is the value of the foreign key
attribute on ``other`` which refers to the primary
key of its parent object. From this it follows that
:meth:`~.RelationshipProperty.Comparator.contains` is
very useful when used with simple one-to-many
operations.
For many-to-many operations, the behavior of
:meth:`~.RelationshipProperty.Comparator.contains`
has more caveats. The association table will be
rendered in the statement, producing an "implicit"
join, that is, includes multiple tables in the FROM
clause which are equated in the WHERE clause::
query(MyClass).filter(MyClass.contains(other))
Produces a query like::
SELECT * FROM my_table, my_association_table AS
my_association_table_1 WHERE
my_table.id = my_association_table_1.parent_id
AND my_association_table_1.child_id = <some id>
Where ``<some id>`` would be the primary key of
``other``. From the above, it is clear that
:meth:`~.RelationshipProperty.Comparator.contains`
will **not** work with many-to-many collections when
used in queries that move beyond simple AND
conjunctions, such as multiple
:meth:`~.RelationshipProperty.Comparator.contains`
expressions joined by OR. In such cases subqueries or
explicit "outer joins" will need to be used instead.
See :meth:`~.RelationshipProperty.Comparator.any` for
a less-performant alternative using EXISTS, or refer
to :meth:`.Query.outerjoin` as well as :ref:`ormtutorial_joins`
for more details on constructing outer joins.
"""
if not self.property.uselist:
raise sa_exc.InvalidRequestError(
"'contains' not implemented for scalar "
"attributes. Use ==")
clause = self.property._optimized_compare(other,
adapt_source=self.adapter)
if self.property.secondaryjoin is not None:
clause.negation_clause = \
self.__negated_contains_or_equals(other)
return clause
def __negated_contains_or_equals(self, other):
if self.property.direction == MANYTOONE:
state = attributes.instance_state(other)
def state_bindparam(x, state, col):
o = state.obj() # strong ref
return sql.bindparam(x, unique=True, callable_=lambda : \
self.property.mapper._get_committed_attr_by_column(o,
col))
def adapt(col):
if self.adapter:
return self.adapter(col)
else:
return col
if self.property._use_get:
return sql.and_(*[
sql.or_(
adapt(x) != state_bindparam(adapt(x), state, y),
adapt(x) == None)
for (x, y) in self.property.local_remote_pairs])
criterion = sql.and_(*[x==y for (x, y) in
zip(
self.property.mapper.primary_key,
self.property.\
mapper.\
primary_key_from_instance(other))
])
return ~self._criterion_exists(criterion)
def __ne__(self, other):
"""Implement the ``!=`` operator.
In a many-to-one context, such as::
MyClass.some_prop != <some object>
This will typically produce a clause such as::
mytable.related_id != <some id>
Where ``<some id>`` is the primary key of the
given object.
The ``!=`` operator provides partial functionality for non-
many-to-one comparisons:
* Comparisons against collections are not supported.
Use
:meth:`~.RelationshipProperty.Comparator.contains`
in conjunction with :func:`~.expression.not_`.
* Compared to a scalar one-to-many, will produce a
clause that compares the target columns in the parent to
the given target.
* Compared to a scalar many-to-many, an alias
of the association table will be rendered as
well, forming a natural join that is part of the
main body of the query. This will not work for
queries that go beyond simple AND conjunctions of
comparisons, such as those which use OR. Use
explicit joins, outerjoins, or
:meth:`~.RelationshipProperty.Comparator.has` in
conjunction with :func:`~.expression.not_` for
more comprehensive non-many-to-one scalar
membership tests.
* Comparisons against ``None`` given in a one-to-many
or many-to-many context produce an EXISTS clause.
"""
if isinstance(other, (NoneType, expression._Null)):
if self.property.direction == MANYTOONE:
return sql.or_(*[x != None for x in
self.property._calculated_foreign_keys])
else:
return self._criterion_exists()
elif self.property.uselist:
raise sa_exc.InvalidRequestError("Can't compare a collection"
" to an object or collection; use "
"contains() to test for membership.")
else:
return self.__negated_contains_or_equals(other)
@util.memoized_property
def property(self):
if mapperlib.module._new_mappers:
configure_mappers()
return self.prop
def compare(self, op, value,
value_is_parent=False,
alias_secondary=True):
if op == operators.eq:
if value is None:
if self.uselist:
return ~sql.exists([1], self.primaryjoin)
else:
return self._optimized_compare(None,
value_is_parent=value_is_parent,
alias_secondary=alias_secondary)
else:
return self._optimized_compare(value,
value_is_parent=value_is_parent,
alias_secondary=alias_secondary)
else:
return op(self.comparator, value)
def _optimized_compare(self, value, value_is_parent=False,
adapt_source=None,
alias_secondary=True):
if value is not None:
value = attributes.instance_state(value)
return self._get_strategy(strategies.LazyLoader).lazy_clause(value,
reverse_direction=not value_is_parent,
alias_secondary=alias_secondary,
adapt_source=adapt_source)
def __str__(self):
return str(self.parent.class_.__name__) + "." + self.key
def merge(self,
session,
source_state,
source_dict,
dest_state,
dest_dict,
load, _recursive):
if load:
for r in self._reverse_property:
if (source_state, r) in _recursive:
return
if not "merge" in self.cascade:
return
if self.key not in source_dict:
return
if self.uselist:
instances = source_state.get_impl(self.key).\
get(source_state, source_dict)
if hasattr(instances, '_sa_adapter'):
# convert collections to adapters to get a true iterator
instances = instances._sa_adapter
if load:
# for a full merge, pre-load the destination collection,
# so that individual _merge of each item pulls from identity
# map for those already present.
# also assumes CollectionAttrbiuteImpl behavior of loading
# "old" list in any case
dest_state.get_impl(self.key).get(dest_state, dest_dict)
dest_list = []
for current in instances:
current_state = attributes.instance_state(current)
current_dict = attributes.instance_dict(current)
_recursive[(current_state, self)] = True
obj = session._merge(current_state, current_dict,
load=load, _recursive=_recursive)
if obj is not None:
dest_list.append(obj)
if not load:
coll = attributes.init_state_collection(dest_state,
dest_dict, self.key)
for c in dest_list:
coll.append_without_event(c)
else:
dest_state.get_impl(self.key)._set_iterable(dest_state,
dest_dict, dest_list)
else:
current = source_dict[self.key]
if current is not None:
current_state = attributes.instance_state(current)
current_dict = attributes.instance_dict(current)
_recursive[(current_state, self)] = True
obj = session._merge(current_state, current_dict,
load=load, _recursive=_recursive)
else:
obj = None
if not load:
dest_dict[self.key] = obj
else:
dest_state.get_impl(self.key).set(dest_state,
dest_dict, obj, None)
def cascade_iterator(self, type_, state, dict_, visited_states, halt_on=None):
#assert type_ in self.cascade
# only actively lazy load on the 'delete' cascade
if type_ != 'delete' or self.passive_deletes:
passive = attributes.PASSIVE_NO_INITIALIZE
else:
passive = attributes.PASSIVE_OFF
if type_ == 'save-update':
tuples = state.manager[self.key].impl.\
get_all_pending(state, dict_)
else:
tuples = state.value_as_iterable(dict_, self.key,
passive=passive)
skip_pending = type_ == 'refresh-expire' and 'delete-orphan' \
not in self.cascade
for instance_state, c in tuples:
if instance_state in visited_states:
continue
if c is None:
# would like to emit a warning here, but
# would not be consistent with collection.append(None)
# current behavior of silently skipping.
# see [ticket:2229]
continue
instance_dict = attributes.instance_dict(c)
if halt_on and halt_on(instance_state):
continue
if skip_pending and not instance_state.key:
continue
instance_mapper = instance_state.manager.mapper
if not instance_mapper.isa(self.mapper.class_manager.mapper):
raise AssertionError("Attribute '%s' on class '%s' "
"doesn't handle objects "
"of type '%s'" % (
self.key,
self.parent.class_,
c.__class__
))
visited_states.add(instance_state)
yield c, instance_mapper, instance_state, instance_dict
def _add_reverse_property(self, key):
other = self.mapper.get_property(key, _compile_mappers=False)
self._reverse_property.add(other)
other._reverse_property.add(self)
if not other.mapper.common_parent(self.parent):
raise sa_exc.ArgumentError('reverse_property %r on '
'relationship %s references relationship %s, which '
'does not reference mapper %s' % (key, self, other,
self.parent))
if self.direction in (ONETOMANY, MANYTOONE) and self.direction \
== other.direction:
raise sa_exc.ArgumentError('%s and back-reference %s are '
'both of the same direction %r. Did you mean to '
'set remote_side on the many-to-one side ?'
% (other, self, self.direction))
@util.memoized_property
def mapper(self):
"""Return the targeted :class:`.Mapper` for this
:class:`.RelationshipProperty`.
This is a lazy-initializing static attribute.
"""
if isinstance(self.argument, type):
mapper_ = mapper.class_mapper(self.argument,
compile=False)
elif isinstance(self.argument, mapper.Mapper):
mapper_ = self.argument
elif util.callable(self.argument):
# accept a callable to suit various deferred-
# configurational schemes
mapper_ = mapper.class_mapper(self.argument(),
compile=False)
else:
raise sa_exc.ArgumentError("relationship '%s' expects "
"a class or a mapper argument (received: %s)"
% (self.key, type(self.argument)))
assert isinstance(mapper_, mapper.Mapper), mapper_
return mapper_
@util.memoized_property
@util.deprecated("0.7", "Use .target")
def table(self):
"""Return the selectable linked to this
:class:`.RelationshipProperty` object's target
:class:`.Mapper`."""
return self.target
def do_init(self):
self._check_conflicts()
self._process_dependent_arguments()
self._determine_joins()
self._determine_synchronize_pairs()
self._determine_direction()
self._determine_local_remote_pairs()
self._post_init()
self._generate_backref()
super(RelationshipProperty, self).do_init()
def _check_conflicts(self):
"""Test that this relationship is legal, warn about
inheritance conflicts."""
if not self.is_primary() \
and not mapper.class_mapper(
self.parent.class_,
compile=False).has_property(self.key):
raise sa_exc.ArgumentError("Attempting to assign a new "
"relationship '%s' to a non-primary mapper on "
"class '%s'. New relationships can only be added "
"to the primary mapper, i.e. the very first mapper "
"created for class '%s' " % (self.key,
self.parent.class_.__name__,
self.parent.class_.__name__))
# check for conflicting relationship() on superclass
if not self.parent.concrete:
for inheriting in self.parent.iterate_to_root():
if inheriting is not self.parent \
and inheriting.has_property(self.key):
util.warn("Warning: relationship '%s' on mapper "
"'%s' supersedes the same relationship "
"on inherited mapper '%s'; this can "
"cause dependency issues during flush"
% (self.key, self.parent, inheriting))
def _process_dependent_arguments(self):
"""Convert incoming configuration arguments to their
proper form.
Callables are resolved, ORM annotations removed.
"""
# accept callables for other attributes which may require
# deferred initialization. This technique is used
# by declarative "string configs" and some recipes.
for attr in (
'order_by',
'primaryjoin',
'secondaryjoin',
'secondary',
'_user_defined_foreign_keys',
'remote_side',
):
attr_value = getattr(self, attr)
if util.callable(attr_value):
setattr(self, attr, attr_value())
# remove "annotations" which are present if mapped class
# descriptors are used to create the join expression.
for attr in 'primaryjoin', 'secondaryjoin':
val = getattr(self, attr)
if val is not None:
setattr(self, attr, _orm_deannotate(
expression._only_column_elements(val, attr))
)
# ensure expressions in self.order_by, foreign_keys,
# remote_side are all columns, not strings.
if self.order_by is not False and self.order_by is not None:
self.order_by = [
expression._only_column_elements(x, "order_by")
for x in
util.to_list(self.order_by)]
self._user_defined_foreign_keys = \
util.column_set(
expression._only_column_elements(x, "foreign_keys")
for x in util.to_column_set(
self._user_defined_foreign_keys
))
self.remote_side = \
util.column_set(
expression._only_column_elements(x, "remote_side")
for x in
util.to_column_set(self.remote_side))
self.target = self.mapper.mapped_table
if self.cascade.delete_orphan:
self.mapper.primary_mapper().delete_orphans.append(
(self.key, self.parent.class_)
)
def _determine_joins(self):
"""Determine the 'primaryjoin' and 'secondaryjoin' attributes,
if not passed to the constructor already.
This is based on analysis of the foreign key relationships
between the parent and target mapped selectables.
"""
if self.secondaryjoin is not None and self.secondary is None:
raise sa_exc.ArgumentError("Property '" + self.key
+ "' specified with secondary join condition but "
"no secondary argument")
# if join conditions were not specified, figure them out based
# on foreign keys
def _search_for_join(mapper, table):
# find a join between the given mapper's mapped table and
# the given table. will try the mapper's local table first
# for more specificity, then if not found will try the more
# general mapped table, which in the case of inheritance is
# a join.
return join_condition(mapper.mapped_table, table,
a_subset=mapper.local_table)
try:
if self.secondary is not None:
if self.secondaryjoin is None:
self.secondaryjoin = _search_for_join(self.mapper,
self.secondary)
if self.primaryjoin is None:
self.primaryjoin = _search_for_join(self.parent,
self.secondary)
else:
if self.primaryjoin is None:
self.primaryjoin = _search_for_join(self.parent,
self.target)
except sa_exc.ArgumentError, e:
raise sa_exc.ArgumentError("Could not determine join "
"condition between parent/child tables on "
"relationship %s. Specify a 'primaryjoin' "
"expression. If 'secondary' is present, "
"'secondaryjoin' is needed as well."
% self)
def _columns_are_mapped(self, *cols):
"""Return True if all columns in the given collection are
mapped by the tables referenced by this :class:`.Relationship`.
"""
for c in cols:
if self.secondary is not None \
and self.secondary.c.contains_column(c):
continue
if not self.parent.mapped_table.c.contains_column(c) and \
not self.target.c.contains_column(c):
return False
return True
def _sync_pairs_from_join(self, join_condition, primary):
"""Determine a list of "source"/"destination" column pairs
based on the given join condition, as well as the
foreign keys argument.
"source" would be a column referenced by a foreign key,
and "destination" would be the column who has a foreign key
reference to "source".
"""
fks = self._user_defined_foreign_keys
# locate pairs
eq_pairs = criterion_as_pairs(join_condition,
consider_as_foreign_keys=fks,
any_operator=self.viewonly)
# couldn't find any fks, but we have
# "secondary" - assume the "secondary" columns
# are the fks
if not eq_pairs and \
self.secondary is not None and \
not fks:
fks = set(self.secondary.c)
eq_pairs = criterion_as_pairs(join_condition,
consider_as_foreign_keys=fks,
any_operator=self.viewonly)
if eq_pairs:
util.warn("No ForeignKey objects were present "
"in secondary table '%s'. Assumed referenced "
"foreign key columns %s for join condition '%s' "
"on relationship %s" % (
self.secondary.description,
", ".join(sorted(["'%s'" % col for col in fks])),
join_condition,
self
))
# Filter out just to columns that are mapped.
# If viewonly, allow pairs where the FK col
# was part of "foreign keys" - the column it references
# may be in an un-mapped table - see
# test.orm.test_relationships.ViewOnlyComplexJoin.test_basic
# for an example of this.
eq_pairs = [(l, r) for (l, r) in eq_pairs
if self._columns_are_mapped(l, r)
or self.viewonly and
r in fks]
if eq_pairs:
return eq_pairs
# from here below is just determining the best error message
# to report. Check for a join condition using any operator
# (not just ==), perhaps they need to turn on "viewonly=True".
if not self.viewonly and criterion_as_pairs(join_condition,
consider_as_foreign_keys=self._user_defined_foreign_keys,
any_operator=True):
err = "Could not locate any "\
"foreign-key-equated, locally mapped column "\
"pairs for %s "\
"condition '%s' on relationship %s." % (
primary and 'primaryjoin' or 'secondaryjoin',
join_condition,
self
)
if not self._user_defined_foreign_keys:
err += " Ensure that the "\
"referencing Column objects have a "\
"ForeignKey present, or are otherwise part "\
"of a ForeignKeyConstraint on their parent "\
"Table, or specify the foreign_keys parameter "\
"to this relationship."
err += " For more "\
"relaxed rules on join conditions, the "\
"relationship may be marked as viewonly=True."
raise sa_exc.ArgumentError(err)
else:
if self._user_defined_foreign_keys:
raise sa_exc.ArgumentError("Could not determine "
"relationship direction for %s condition "
"'%s', on relationship %s, using manual "
"'foreign_keys' setting. Do the columns "
"in 'foreign_keys' represent all, and "
"only, the 'foreign' columns in this join "
"condition? Does the %s Table already "
"have adequate ForeignKey and/or "
"ForeignKeyConstraint objects established "
"(in which case 'foreign_keys' is usually "
"unnecessary)?"
% (
primary and 'primaryjoin' or 'secondaryjoin',
join_condition,
self,
primary and 'mapped' or 'secondary'
))
else:
raise sa_exc.ArgumentError("Could not determine "
"relationship direction for %s condition "
"'%s', on relationship %s. Ensure that the "
"referencing Column objects have a "
"ForeignKey present, or are otherwise part "
"of a ForeignKeyConstraint on their parent "
"Table, or specify the foreign_keys parameter "
"to this relationship."
% (
primary and 'primaryjoin' or 'secondaryjoin',
join_condition,
self
))
def _determine_synchronize_pairs(self):
"""Resolve 'primary'/foreign' column pairs from the primaryjoin
and secondaryjoin arguments.
"""
if self.local_remote_pairs:
if not self._user_defined_foreign_keys:
raise sa_exc.ArgumentError(
"foreign_keys argument is "
"required with _local_remote_pairs argument")
self.synchronize_pairs = []
for l, r in self.local_remote_pairs:
if r in self._user_defined_foreign_keys:
self.synchronize_pairs.append((l, r))
elif l in self._user_defined_foreign_keys:
self.synchronize_pairs.append((r, l))
else:
self.synchronize_pairs = self._sync_pairs_from_join(
self.primaryjoin,
True)
self._calculated_foreign_keys = util.column_set(
r for (l, r) in
self.synchronize_pairs)
if self.secondaryjoin is not None:
self.secondary_synchronize_pairs = self._sync_pairs_from_join(
self.secondaryjoin,
False)
self._calculated_foreign_keys.update(
r for (l, r) in
self.secondary_synchronize_pairs)
else:
self.secondary_synchronize_pairs = None
def _determine_direction(self):
"""Determine if this relationship is one to many, many to one,
many to many.
This is derived from the primaryjoin, presence of "secondary",
and in the case of self-referential the "remote side".
"""
if self.secondaryjoin is not None:
self.direction = MANYTOMANY
elif self._refers_to_parent_table():
# self referential defaults to ONETOMANY unless the "remote"
# side is present and does not reference any foreign key
# columns
if self.local_remote_pairs:
remote = [r for (l, r) in self.local_remote_pairs]
elif self.remote_side:
remote = self.remote_side
else:
remote = None
if not remote or self._calculated_foreign_keys.difference(l for (l,
r) in self.synchronize_pairs).intersection(remote):
self.direction = ONETOMANY
else:
self.direction = MANYTOONE
else:
parentcols = util.column_set(self.parent.mapped_table.c)
targetcols = util.column_set(self.mapper.mapped_table.c)
# fk collection which suggests ONETOMANY.
onetomany_fk = targetcols.intersection(
self._calculated_foreign_keys)
# fk collection which suggests MANYTOONE.
manytoone_fk = parentcols.intersection(
self._calculated_foreign_keys)
if onetomany_fk and manytoone_fk:
# fks on both sides. do the same test only based on the
# local side.
referents = [c for (c, f) in self.synchronize_pairs]
onetomany_local = parentcols.intersection(referents)
manytoone_local = targetcols.intersection(referents)
if onetomany_local and not manytoone_local:
self.direction = ONETOMANY
elif manytoone_local and not onetomany_local:
self.direction = MANYTOONE
else:
raise sa_exc.ArgumentError(
"Can't determine relationship"
" direction for relationship '%s' - foreign "
"key columns are present in both the parent "
"and the child's mapped tables. Specify "
"'foreign_keys' argument." % self)
elif onetomany_fk:
self.direction = ONETOMANY
elif manytoone_fk:
self.direction = MANYTOONE
else:
raise sa_exc.ArgumentError("Can't determine relationship "
"direction for relationship '%s' - foreign "
"key columns are present in neither the parent "
"nor the child's mapped tables" % self)
if self.cascade.delete_orphan and not self.single_parent \
and (self.direction is MANYTOMANY or self.direction
is MANYTOONE):
util.warn('On %s, delete-orphan cascade is not supported '
'on a many-to-many or many-to-one relationship '
'when single_parent is not set. Set '
'single_parent=True on the relationship().'
% self)
if self.direction is MANYTOONE and self.passive_deletes:
util.warn("On %s, 'passive_deletes' is normally configured "
"on one-to-many, one-to-one, many-to-many "
"relationships only."
% self)
def _determine_local_remote_pairs(self):
"""Determine pairs of columns representing "local" to
"remote", where "local" columns are on the parent mapper,
"remote" are on the target mapper.
These pairs are used on the load side only to generate
lazy loading clauses.
"""
if not self.local_remote_pairs and not self.remote_side:
# the most common, trivial case. Derive
# local/remote pairs from the synchronize pairs.
eq_pairs = util.unique_list(
self.synchronize_pairs +
(self.secondary_synchronize_pairs or []))
if self.direction is MANYTOONE:
self.local_remote_pairs = [(r, l) for l, r in eq_pairs]
else:
self.local_remote_pairs = eq_pairs
# "remote_side" specified, derive from the primaryjoin
# plus remote_side, similarly to how synchronize_pairs
# were determined.
elif self.remote_side:
if self.local_remote_pairs:
raise sa_exc.ArgumentError('remote_side argument is '
'redundant against more detailed '
'_local_remote_side argument.')
if self.direction is MANYTOONE:
self.local_remote_pairs = [(r, l) for (l, r) in
criterion_as_pairs(self.primaryjoin,
consider_as_referenced_keys=self.remote_side,
any_operator=True)]
else:
self.local_remote_pairs = \
criterion_as_pairs(self.primaryjoin,
consider_as_foreign_keys=self.remote_side,
any_operator=True)
if not self.local_remote_pairs:
raise sa_exc.ArgumentError('Relationship %s could '
'not determine any local/remote column '
'pairs from remote side argument %r'
% (self, self.remote_side))
# else local_remote_pairs were sent explcitly via
# ._local_remote_pairs.
# create local_side/remote_side accessors
self.local_side = util.ordered_column_set(
l for l, r in self.local_remote_pairs)
self.remote_side = util.ordered_column_set(
r for l, r in self.local_remote_pairs)
# check that the non-foreign key column in the local/remote
# collection is mapped. The foreign key
# which the individual mapped column references directly may
# itself be in a non-mapped table; see
# test.orm.test_relationships.ViewOnlyComplexJoin.test_basic
# for an example of this.
if self.direction is ONETOMANY:
for col in self.local_side:
if not self._columns_are_mapped(col):
raise sa_exc.ArgumentError(
"Local column '%s' is not "
"part of mapping %s. Specify remote_side "
"argument to indicate which column lazy join "
"condition should compare against." % (col,
self.parent))
elif self.direction is MANYTOONE:
for col in self.remote_side:
if not self._columns_are_mapped(col):
raise sa_exc.ArgumentError(
"Remote column '%s' is not "
"part of mapping %s. Specify remote_side "
"argument to indicate which column lazy join "
"condition should bind." % (col, self.mapper))
def _generate_backref(self):
if not self.is_primary():
return
if self.backref is not None and not self.back_populates:
if isinstance(self.backref, basestring):
backref_key, kwargs = self.backref, {}
else:
backref_key, kwargs = self.backref
mapper = self.mapper.primary_mapper()
if mapper.has_property(backref_key):
raise sa_exc.ArgumentError("Error creating backref "
"'%s' on relationship '%s': property of that "
"name exists on mapper '%s'" % (backref_key,
self, mapper))
if self.secondary is not None:
pj = kwargs.pop('primaryjoin', self.secondaryjoin)
sj = kwargs.pop('secondaryjoin', self.primaryjoin)
else:
pj = kwargs.pop('primaryjoin', self.primaryjoin)
sj = kwargs.pop('secondaryjoin', None)
if sj:
raise sa_exc.InvalidRequestError(
"Can't assign 'secondaryjoin' on a backref against "
"a non-secondary relationship."
)
foreign_keys = kwargs.pop('foreign_keys',
self._user_defined_foreign_keys)
parent = self.parent.primary_mapper()
kwargs.setdefault('viewonly', self.viewonly)
kwargs.setdefault('post_update', self.post_update)
kwargs.setdefault('passive_updates', self.passive_updates)
self.back_populates = backref_key
relationship = RelationshipProperty(
parent,
self.secondary,
pj,
sj,
foreign_keys=foreign_keys,
back_populates=self.key,
**kwargs
)
mapper._configure_property(backref_key, relationship)
if self.back_populates:
self._add_reverse_property(self.back_populates)
def _post_init(self):
self.logger.info('%s setup primary join %s', self,
self.primaryjoin)
self.logger.info('%s setup secondary join %s', self,
self.secondaryjoin)
self.logger.info('%s synchronize pairs [%s]', self,
','.join('(%s => %s)' % (l, r) for (l, r) in
self.synchronize_pairs))
self.logger.info('%s secondary synchronize pairs [%s]', self,
','.join('(%s => %s)' % (l, r) for (l, r) in
self.secondary_synchronize_pairs or []))
self.logger.info('%s local/remote pairs [%s]', self,
','.join('(%s / %s)' % (l, r) for (l, r) in
self.local_remote_pairs))
self.logger.info('%s relationship direction %s', self,
self.direction)
if self.uselist is None:
self.uselist = self.direction is not MANYTOONE
if not self.viewonly:
self._dependency_processor = \
dependency.DependencyProcessor.from_relationship(self)
@util.memoized_property
def _use_get(self):
"""memoize the 'use_get' attribute of this RelationshipLoader's
lazyloader."""
strategy = self._get_strategy(strategies.LazyLoader)
return strategy.use_get
def _refers_to_parent_table(self):
pt = self.parent.mapped_table
mt = self.mapper.mapped_table
for c, f in self.synchronize_pairs:
if (
pt.is_derived_from(c.table) and \
pt.is_derived_from(f.table) and \
mt.is_derived_from(c.table) and \
mt.is_derived_from(f.table)
):
return True
else:
return False
@util.memoized_property
def _is_self_referential(self):
return self.mapper.common_parent(self.parent)
def per_property_preprocessors(self, uow):
if not self.viewonly and self._dependency_processor:
self._dependency_processor.per_property_preprocessors(uow)
def _create_joins(self, source_polymorphic=False,
source_selectable=None, dest_polymorphic=False,
dest_selectable=None, of_type=None):
if source_selectable is None:
if source_polymorphic and self.parent.with_polymorphic:
source_selectable = self.parent._with_polymorphic_selectable
aliased = False
if dest_selectable is None:
if dest_polymorphic and self.mapper.with_polymorphic:
dest_selectable = self.mapper._with_polymorphic_selectable
aliased = True
else:
dest_selectable = self.mapper.mapped_table
if self._is_self_referential and source_selectable is None:
dest_selectable = dest_selectable.alias()
aliased = True
else:
aliased = True
# place a barrier on the destination such that
# replacement traversals won't ever dig into it.
# its internal structure remains fixed
# regardless of context.
dest_selectable = _shallow_annotate(
dest_selectable,
{'no_replacement_traverse':True})
aliased = aliased or (source_selectable is not None)
primaryjoin, secondaryjoin, secondary = self.primaryjoin, \
self.secondaryjoin, self.secondary
# adjust the join condition for single table inheritance,
# in the case that the join is to a subclass
# this is analogous to the "_adjust_for_single_table_inheritance()"
# method in Query.
dest_mapper = of_type or self.mapper
single_crit = dest_mapper._single_table_criterion
if single_crit is not None:
if secondaryjoin is not None:
secondaryjoin = secondaryjoin & single_crit
else:
primaryjoin = primaryjoin & single_crit
if aliased:
if secondary is not None:
secondary = secondary.alias()
primary_aliasizer = ClauseAdapter(secondary)
secondary_aliasizer = \
ClauseAdapter(dest_selectable,
equivalents=self.mapper._equivalent_columns).\
chain(primary_aliasizer)
if source_selectable is not None:
primary_aliasizer = \
ClauseAdapter(secondary).\
chain(ClauseAdapter(source_selectable,
equivalents=self.parent._equivalent_columns))
secondaryjoin = \
secondary_aliasizer.traverse(secondaryjoin)
else:
primary_aliasizer = ClauseAdapter(dest_selectable,
exclude=self.local_side,
equivalents=self.mapper._equivalent_columns)
if source_selectable is not None:
primary_aliasizer.chain(
ClauseAdapter(source_selectable,
exclude=self.remote_side,
equivalents=self.parent._equivalent_columns))
secondary_aliasizer = None
primaryjoin = primary_aliasizer.traverse(primaryjoin)
target_adapter = secondary_aliasizer or primary_aliasizer
target_adapter.include = target_adapter.exclude = None
else:
target_adapter = None
if source_selectable is None:
source_selectable = self.parent.local_table
if dest_selectable is None:
dest_selectable = self.mapper.local_table
return (
primaryjoin,
secondaryjoin,
source_selectable,
dest_selectable,
secondary,
target_adapter,
)
PropertyLoader = RelationProperty = RelationshipProperty
log.class_logger(RelationshipProperty)
| 41.061941 | 84 | 0.554671 |
"""MapperProperty implementations.
This is a private module which defines the behavior of invidual ORM-
mapped attributes.
"""
from sqlalchemy import sql, util, log, exc as sa_exc
from sqlalchemy.sql.util import ClauseAdapter, criterion_as_pairs, \
join_condition, _shallow_annotate
from sqlalchemy.sql import operators, expression
from sqlalchemy.orm import attributes, dependency, mapper, \
object_mapper, strategies, configure_mappers
from sqlalchemy.orm.util import CascadeOptions, _class_to_mapper, \
_orm_annotate, _orm_deannotate
from sqlalchemy.orm.interfaces import MANYTOMANY, MANYTOONE, \
MapperProperty, ONETOMANY, PropComparator, StrategizedProperty
mapperlib = util.importlater("sqlalchemy.orm", "mapperlib")
NoneType = type(None)
__all__ = ('ColumnProperty', 'CompositeProperty', 'SynonymProperty',
'ComparableProperty', 'RelationshipProperty', 'RelationProperty')
from descriptor_props import CompositeProperty, SynonymProperty, \
ComparableProperty,ConcreteInheritedProperty
class ColumnProperty(StrategizedProperty):
"""Describes an object attribute that corresponds to a table column.
Public constructor is the :func:`.orm.column_property` function.
"""
def __init__(self, *columns, **kwargs):
"""Construct a ColumnProperty.
Note the public constructor is the :func:`.orm.column_property` function.
:param \*columns: The list of `columns` describes a single
object property. If there are multiple tables joined
together for the mapper, this list represents the equivalent
column as it appears across each table.
:param group:
:param deferred:
:param comparator_factory:
:param descriptor:
:param expire_on_flush:
:param extension:
"""
self._orig_columns = [expression._labeled(c) for c in columns]
self.columns = [expression._labeled(_orm_deannotate(c))
for c in columns]
self.group = kwargs.pop('group', None)
self.deferred = kwargs.pop('deferred', False)
self.instrument = kwargs.pop('_instrument', True)
self.comparator_factory = kwargs.pop('comparator_factory',
self.__class__.Comparator)
self.descriptor = kwargs.pop('descriptor', None)
self.extension = kwargs.pop('extension', None)
self.active_history = kwargs.pop('active_history', False)
self.expire_on_flush = kwargs.pop('expire_on_flush', True)
if 'doc' in kwargs:
self.doc = kwargs.pop('doc')
else:
for col in reversed(self.columns):
doc = getattr(col, 'doc', None)
if doc is not None:
self.doc = doc
break
else:
self.doc = None
if kwargs:
raise TypeError(
"%s received unexpected keyword argument(s): %s" % (
self.__class__.__name__,
', '.join(sorted(kwargs.keys()))))
util.set_creation_order(self)
if not self.instrument:
self.strategy_class = strategies.UninstrumentedColumnLoader
elif self.deferred:
self.strategy_class = strategies.DeferredColumnLoader
else:
self.strategy_class = strategies.ColumnLoader
def instrument_class(self, mapper):
if not self.instrument:
return
attributes.register_descriptor(
mapper.class_,
self.key,
comparator=self.comparator_factory(self, mapper),
parententity=mapper,
doc=self.doc
)
def do_init(self):
super(ColumnProperty, self).do_init()
if len(self.columns) > 1 and \
set(self.parent.primary_key).issuperset(self.columns):
util.warn(
("On mapper %s, primary key column '%s' is being combined "
"with distinct primary key column '%s' in attribute '%s'. "
"Use explicit properties to give each column its own mapped "
"attribute name.") % (self.parent, self.columns[1],
self.columns[0], self.key))
def copy(self):
return ColumnProperty(
deferred=self.deferred,
group=self.group,
active_history=self.active_history,
*self.columns)
def _getcommitted(self, state, dict_, column,
passive=attributes.PASSIVE_OFF):
return state.get_impl(self.key).\
get_committed_value(state, dict_, passive=passive)
def merge(self, session, source_state, source_dict, dest_state,
dest_dict, load, _recursive):
if not self.instrument:
return
elif self.key in source_dict:
value = source_dict[self.key]
if not load:
dest_dict[self.key] = value
else:
impl = dest_state.get_impl(self.key)
impl.set(dest_state, dest_dict, value, None)
elif dest_state.has_identity and self.key not in dest_dict:
dest_state.expire_attributes(dest_dict, [self.key])
class Comparator(PropComparator):
@util.memoized_instancemethod
def __clause_element__(self):
if self.adapter:
return self.adapter(self.prop.columns[0])
else:
return self.prop.columns[0]._annotate({
"parententity": self.mapper,
"parentmapper":self.mapper})
def operate(self, op, *other, **kwargs):
return op(self.__clause_element__(), *other, **kwargs)
def reverse_operate(self, op, other, **kwargs):
col = self.__clause_element__()
return op(col._bind_param(op, other), col, **kwargs)
ColumnComparator = Comparator
def __str__(self):
return str(self.parent.class_.__name__) + "." + self.key
log.class_logger(ColumnProperty)
class RelationshipProperty(StrategizedProperty):
"""Describes an object property that holds a single item or list
of items that correspond to a related database table.
Public constructor is the :func:`.orm.relationship` function.
Of note here is the :class:`.RelationshipProperty.Comparator`
class, which implements comparison operations for scalar-
and collection-referencing mapped attributes.
"""
strategy_wildcard_key = 'relationship:*'
def __init__(self, argument,
secondary=None, primaryjoin=None,
secondaryjoin=None,
foreign_keys=None,
uselist=None,
order_by=False,
backref=None,
back_populates=None,
post_update=False,
cascade=False, extension=None,
viewonly=False, lazy=True,
collection_class=None, passive_deletes=False,
passive_updates=True, remote_side=None,
enable_typechecks=True, join_depth=None,
comparator_factory=None,
single_parent=False, innerjoin=False,
doc=None,
active_history=False,
cascade_backrefs=True,
load_on_pending=False,
strategy_class=None, _local_remote_pairs=None,
query_class=None):
self.uselist = uselist
self.argument = argument
self.secondary = secondary
self.primaryjoin = primaryjoin
self.secondaryjoin = secondaryjoin
self.post_update = post_update
self.direction = None
self.viewonly = viewonly
self.lazy = lazy
self.single_parent = single_parent
self._user_defined_foreign_keys = foreign_keys
self.collection_class = collection_class
self.passive_deletes = passive_deletes
self.cascade_backrefs = cascade_backrefs
self.passive_updates = passive_updates
self.remote_side = remote_side
self.enable_typechecks = enable_typechecks
self.query_class = query_class
self.innerjoin = innerjoin
self.doc = doc
self.active_history = active_history
self.join_depth = join_depth
self.local_remote_pairs = _local_remote_pairs
self.extension = extension
self.load_on_pending = load_on_pending
self.comparator_factory = comparator_factory or \
RelationshipProperty.Comparator
self.comparator = self.comparator_factory(self, None)
util.set_creation_order(self)
if strategy_class:
self.strategy_class = strategy_class
elif self.lazy== 'dynamic':
from sqlalchemy.orm import dynamic
self.strategy_class = dynamic.DynaLoader
else:
self.strategy_class = strategies.factory(self.lazy)
self._reverse_property = set()
if cascade is not False:
self.cascade = CascadeOptions(cascade)
else:
self.cascade = CascadeOptions("save-update, merge")
if self.passive_deletes == 'all' and \
("delete" in self.cascade or
"delete-orphan" in self.cascade):
raise sa_exc.ArgumentError(
"Can't set passive_deletes='all' in conjunction "
"with 'delete' or 'delete-orphan' cascade")
self.order_by = order_by
self.back_populates = back_populates
if self.back_populates:
if backref:
raise sa_exc.ArgumentError(
"backref and back_populates keyword arguments "
"are mutually exclusive")
self.backref = None
else:
self.backref = backref
def instrument_class(self, mapper):
attributes.register_descriptor(
mapper.class_,
self.key,
comparator=self.comparator_factory(self, mapper),
parententity=mapper,
doc=self.doc,
)
class Comparator(PropComparator):
"""Produce comparison operations for :func:`~.orm.relationship`-based
attributes."""
def __init__(self, prop, mapper, of_type=None, adapter=None):
"""Construction of :class:`.RelationshipProperty.Comparator`
is internal to the ORM's attribute mechanics.
"""
self.prop = prop
self.mapper = mapper
self.adapter = adapter
if of_type:
self._of_type = _class_to_mapper(of_type)
def adapted(self, adapter):
"""Return a copy of this PropComparator which will use the
given adaption function on the local side of generated
expressions.
"""
return self.__class__(self.property, self.mapper,
getattr(self, '_of_type', None),
adapter)
@property
def parententity(self):
return self.property.parent
def __clause_element__(self):
elem = self.property.parent._with_polymorphic_selectable
if self.adapter:
return self.adapter(elem)
else:
return elem
def of_type(self, cls):
"""Produce a construct that represents a particular 'subtype' of
attribute for the parent class.
Currently this is usable in conjunction with :meth:`.Query.join`
and :meth:`.Query.outerjoin`.
"""
return RelationshipProperty.Comparator(
self.property,
self.mapper,
cls, adapter=self.adapter)
def in_(self, other):
"""Produce an IN clause - this is not implemented
for :func:`~.orm.relationship`-based attributes at this time.
"""
raise NotImplementedError('in_() not yet supported for '
'relationships. For a simple many-to-one, use '
'in_() against the set of foreign key values.')
__hash__ = None
def __eq__(self, other):
"""Implement the ``==`` operator.
In a many-to-one context, such as::
MyClass.some_prop == <some object>
this will typically produce a
clause such as::
mytable.related_id == <some id>
Where ``<some id>`` is the primary key of the given
object.
The ``==`` operator provides partial functionality for non-
many-to-one comparisons:
* Comparisons against collections are not supported.
Use :meth:`~.RelationshipProperty.Comparator.contains`.
* Compared to a scalar one-to-many, will produce a
clause that compares the target columns in the parent to
the given target.
* Compared to a scalar many-to-many, an alias
of the association table will be rendered as
well, forming a natural join that is part of the
main body of the query. This will not work for
queries that go beyond simple AND conjunctions of
comparisons, such as those which use OR. Use
explicit joins, outerjoins, or
:meth:`~.RelationshipProperty.Comparator.has` for
more comprehensive non-many-to-one scalar
membership tests.
* Comparisons against ``None`` given in a one-to-many
or many-to-many context produce a NOT EXISTS clause.
"""
if isinstance(other, (NoneType, expression._Null)):
if self.property.direction in [ONETOMANY, MANYTOMANY]:
return ~self._criterion_exists()
else:
return _orm_annotate(self.property._optimized_compare(
None, adapt_source=self.adapter))
elif self.property.uselist:
raise sa_exc.InvalidRequestError("Can't compare a colle"
"ction to an object or collection; use "
"contains() to test for membership.")
else:
return _orm_annotate(self.property._optimized_compare(other,
adapt_source=self.adapter))
def _criterion_exists(self, criterion=None, **kwargs):
if getattr(self, '_of_type', None):
target_mapper = self._of_type
to_selectable = target_mapper._with_polymorphic_selectable
if self.property._is_self_referential:
to_selectable = to_selectable.alias()
single_crit = target_mapper._single_table_criterion
if single_crit is not None:
if criterion is not None:
criterion = single_crit & criterion
else:
criterion = single_crit
else:
to_selectable = None
if self.adapter:
source_selectable = self.__clause_element__()
else:
source_selectable = None
pj, sj, source, dest, secondary, target_adapter = \
self.property._create_joins(dest_polymorphic=True,
dest_selectable=to_selectable,
source_selectable=source_selectable)
for k in kwargs:
crit = getattr(self.property.mapper.class_, k) == kwargs[k]
if criterion is None:
criterion = crit
else:
criterion = criterion & crit
# annotate the *local* side of the join condition, in the case
# of pj + sj this is the full primaryjoin, in the case of just
# pj its the local side of the primaryjoin.
if sj is not None:
j = _orm_annotate(pj) & sj
else:
j = _orm_annotate(pj, exclude=self.property.remote_side)
if criterion is not None and target_adapter:
# limit this adapter to annotated only?
criterion = target_adapter.traverse(criterion)
# only have the "joined left side" of what we
# return be subject to Query adaption. The right
# side of it is used for an exists() subquery and
# should not correlate or otherwise reach out
# to anything in the enclosing query.
if criterion is not None:
criterion = criterion._annotate({'no_replacement_traverse': True})
crit = j & criterion
return sql.exists([1], crit, from_obj=dest).\
correlate(source._annotate({'_orm_adapt':True}))
def any(self, criterion=None, **kwargs):
"""Produce an expression that tests a collection against
particular criterion, using EXISTS.
An expression like::
session.query(MyClass).filter(
MyClass.somereference.any(SomeRelated.x==2)
)
Will produce a query like::
SELECT * FROM my_table WHERE
EXISTS (SELECT 1 FROM related WHERE related.my_id=my_table.id
AND related.x=2)
Because :meth:`~.RelationshipProperty.Comparator.any` uses
a correlated subquery, its performance is not nearly as
good when compared against large target tables as that of
using a join.
:meth:`~.RelationshipProperty.Comparator.any` is particularly
useful for testing for empty collections::
session.query(MyClass).filter(
~MyClass.somereference.any()
)
will produce::
SELECT * FROM my_table WHERE
NOT EXISTS (SELECT 1 FROM related WHERE related.my_id=my_table.id)
:meth:`~.RelationshipProperty.Comparator.any` is only
valid for collections, i.e. a :func:`.relationship`
that has ``uselist=True``. For scalar references,
use :meth:`~.RelationshipProperty.Comparator.has`.
"""
if not self.property.uselist:
raise sa_exc.InvalidRequestError(
"'any()' not implemented for scalar "
"attributes. Use has()."
)
return self._criterion_exists(criterion, **kwargs)
def has(self, criterion=None, **kwargs):
"""Produce an expression that tests a scalar reference against
particular criterion, using EXISTS.
An expression like::
session.query(MyClass).filter(
MyClass.somereference.has(SomeRelated.x==2)
)
Will produce a query like::
SELECT * FROM my_table WHERE
EXISTS (SELECT 1 FROM related WHERE related.id==my_table.related_id
AND related.x=2)
Because :meth:`~.RelationshipProperty.Comparator.has` uses
a correlated subquery, its performance is not nearly as
good when compared against large target tables as that of
using a join.
:meth:`~.RelationshipProperty.Comparator.has` is only
valid for scalar references, i.e. a :func:`.relationship`
that has ``uselist=False``. For collection references,
use :meth:`~.RelationshipProperty.Comparator.any`.
"""
if self.property.uselist:
raise sa_exc.InvalidRequestError(
"'has()' not implemented for collections. "
"Use any().")
return self._criterion_exists(criterion, **kwargs)
def contains(self, other, **kwargs):
"""Return a simple expression that tests a collection for
containment of a particular item.
:meth:`~.RelationshipProperty.Comparator.contains` is
only valid for a collection, i.e. a
:func:`~.orm.relationship` that implements
one-to-many or many-to-many with ``uselist=True``.
When used in a simple one-to-many context, an
expression like::
MyClass.contains(other)
Produces a clause like::
mytable.id == <some id>
Where ``<some id>`` is the value of the foreign key
attribute on ``other`` which refers to the primary
key of its parent object. From this it follows that
:meth:`~.RelationshipProperty.Comparator.contains` is
very useful when used with simple one-to-many
operations.
For many-to-many operations, the behavior of
:meth:`~.RelationshipProperty.Comparator.contains`
has more caveats. The association table will be
rendered in the statement, producing an "implicit"
join, that is, includes multiple tables in the FROM
clause which are equated in the WHERE clause::
query(MyClass).filter(MyClass.contains(other))
Produces a query like::
SELECT * FROM my_table, my_association_table AS
my_association_table_1 WHERE
my_table.id = my_association_table_1.parent_id
AND my_association_table_1.child_id = <some id>
Where ``<some id>`` would be the primary key of
``other``. From the above, it is clear that
:meth:`~.RelationshipProperty.Comparator.contains`
will **not** work with many-to-many collections when
used in queries that move beyond simple AND
conjunctions, such as multiple
:meth:`~.RelationshipProperty.Comparator.contains`
expressions joined by OR. In such cases subqueries or
explicit "outer joins" will need to be used instead.
See :meth:`~.RelationshipProperty.Comparator.any` for
a less-performant alternative using EXISTS, or refer
to :meth:`.Query.outerjoin` as well as :ref:`ormtutorial_joins`
for more details on constructing outer joins.
"""
if not self.property.uselist:
raise sa_exc.InvalidRequestError(
"'contains' not implemented for scalar "
"attributes. Use ==")
clause = self.property._optimized_compare(other,
adapt_source=self.adapter)
if self.property.secondaryjoin is not None:
clause.negation_clause = \
self.__negated_contains_or_equals(other)
return clause
def __negated_contains_or_equals(self, other):
if self.property.direction == MANYTOONE:
state = attributes.instance_state(other)
def state_bindparam(x, state, col):
o = state.obj() # strong ref
return sql.bindparam(x, unique=True, callable_=lambda : \
self.property.mapper._get_committed_attr_by_column(o,
col))
def adapt(col):
if self.adapter:
return self.adapter(col)
else:
return col
if self.property._use_get:
return sql.and_(*[
sql.or_(
adapt(x) != state_bindparam(adapt(x), state, y),
adapt(x) == None)
for (x, y) in self.property.local_remote_pairs])
criterion = sql.and_(*[x==y for (x, y) in
zip(
self.property.mapper.primary_key,
self.property.\
mapper.\
primary_key_from_instance(other))
])
return ~self._criterion_exists(criterion)
def __ne__(self, other):
"""Implement the ``!=`` operator.
In a many-to-one context, such as::
MyClass.some_prop != <some object>
This will typically produce a clause such as::
mytable.related_id != <some id>
Where ``<some id>`` is the primary key of the
given object.
The ``!=`` operator provides partial functionality for non-
many-to-one comparisons:
* Comparisons against collections are not supported.
Use
:meth:`~.RelationshipProperty.Comparator.contains`
in conjunction with :func:`~.expression.not_`.
* Compared to a scalar one-to-many, will produce a
clause that compares the target columns in the parent to
the given target.
* Compared to a scalar many-to-many, an alias
of the association table will be rendered as
well, forming a natural join that is part of the
main body of the query. This will not work for
queries that go beyond simple AND conjunctions of
comparisons, such as those which use OR. Use
explicit joins, outerjoins, or
:meth:`~.RelationshipProperty.Comparator.has` in
conjunction with :func:`~.expression.not_` for
more comprehensive non-many-to-one scalar
membership tests.
* Comparisons against ``None`` given in a one-to-many
or many-to-many context produce an EXISTS clause.
"""
if isinstance(other, (NoneType, expression._Null)):
if self.property.direction == MANYTOONE:
return sql.or_(*[x != None for x in
self.property._calculated_foreign_keys])
else:
return self._criterion_exists()
elif self.property.uselist:
raise sa_exc.InvalidRequestError("Can't compare a collection"
" to an object or collection; use "
"contains() to test for membership.")
else:
return self.__negated_contains_or_equals(other)
@util.memoized_property
def property(self):
if mapperlib.module._new_mappers:
configure_mappers()
return self.prop
def compare(self, op, value,
value_is_parent=False,
alias_secondary=True):
if op == operators.eq:
if value is None:
if self.uselist:
return ~sql.exists([1], self.primaryjoin)
else:
return self._optimized_compare(None,
value_is_parent=value_is_parent,
alias_secondary=alias_secondary)
else:
return self._optimized_compare(value,
value_is_parent=value_is_parent,
alias_secondary=alias_secondary)
else:
return op(self.comparator, value)
def _optimized_compare(self, value, value_is_parent=False,
adapt_source=None,
alias_secondary=True):
if value is not None:
value = attributes.instance_state(value)
return self._get_strategy(strategies.LazyLoader).lazy_clause(value,
reverse_direction=not value_is_parent,
alias_secondary=alias_secondary,
adapt_source=adapt_source)
def __str__(self):
return str(self.parent.class_.__name__) + "." + self.key
def merge(self,
session,
source_state,
source_dict,
dest_state,
dest_dict,
load, _recursive):
if load:
for r in self._reverse_property:
if (source_state, r) in _recursive:
return
if not "merge" in self.cascade:
return
if self.key not in source_dict:
return
if self.uselist:
instances = source_state.get_impl(self.key).\
get(source_state, source_dict)
if hasattr(instances, '_sa_adapter'):
instances = instances._sa_adapter
if load:
dest_state.get_impl(self.key).get(dest_state, dest_dict)
dest_list = []
for current in instances:
current_state = attributes.instance_state(current)
current_dict = attributes.instance_dict(current)
_recursive[(current_state, self)] = True
obj = session._merge(current_state, current_dict,
load=load, _recursive=_recursive)
if obj is not None:
dest_list.append(obj)
if not load:
coll = attributes.init_state_collection(dest_state,
dest_dict, self.key)
for c in dest_list:
coll.append_without_event(c)
else:
dest_state.get_impl(self.key)._set_iterable(dest_state,
dest_dict, dest_list)
else:
current = source_dict[self.key]
if current is not None:
current_state = attributes.instance_state(current)
current_dict = attributes.instance_dict(current)
_recursive[(current_state, self)] = True
obj = session._merge(current_state, current_dict,
load=load, _recursive=_recursive)
else:
obj = None
if not load:
dest_dict[self.key] = obj
else:
dest_state.get_impl(self.key).set(dest_state,
dest_dict, obj, None)
def cascade_iterator(self, type_, state, dict_, visited_states, halt_on=None):
if type_ != 'delete' or self.passive_deletes:
passive = attributes.PASSIVE_NO_INITIALIZE
else:
passive = attributes.PASSIVE_OFF
if type_ == 'save-update':
tuples = state.manager[self.key].impl.\
get_all_pending(state, dict_)
else:
tuples = state.value_as_iterable(dict_, self.key,
passive=passive)
skip_pending = type_ == 'refresh-expire' and 'delete-orphan' \
not in self.cascade
for instance_state, c in tuples:
if instance_state in visited_states:
continue
if c is None:
continue
instance_dict = attributes.instance_dict(c)
if halt_on and halt_on(instance_state):
continue
if skip_pending and not instance_state.key:
continue
instance_mapper = instance_state.manager.mapper
if not instance_mapper.isa(self.mapper.class_manager.mapper):
raise AssertionError("Attribute '%s' on class '%s' "
"doesn't handle objects "
"of type '%s'" % (
self.key,
self.parent.class_,
c.__class__
))
visited_states.add(instance_state)
yield c, instance_mapper, instance_state, instance_dict
def _add_reverse_property(self, key):
other = self.mapper.get_property(key, _compile_mappers=False)
self._reverse_property.add(other)
other._reverse_property.add(self)
if not other.mapper.common_parent(self.parent):
raise sa_exc.ArgumentError('reverse_property %r on '
'relationship %s references relationship %s, which '
'does not reference mapper %s' % (key, self, other,
self.parent))
if self.direction in (ONETOMANY, MANYTOONE) and self.direction \
== other.direction:
raise sa_exc.ArgumentError('%s and back-reference %s are '
'both of the same direction %r. Did you mean to '
'set remote_side on the many-to-one side ?'
% (other, self, self.direction))
@util.memoized_property
def mapper(self):
"""Return the targeted :class:`.Mapper` for this
:class:`.RelationshipProperty`.
This is a lazy-initializing static attribute.
"""
if isinstance(self.argument, type):
mapper_ = mapper.class_mapper(self.argument,
compile=False)
elif isinstance(self.argument, mapper.Mapper):
mapper_ = self.argument
elif util.callable(self.argument):
# accept a callable to suit various deferred-
# configurational schemes
mapper_ = mapper.class_mapper(self.argument(),
compile=False)
else:
raise sa_exc.ArgumentError("relationship '%s' expects "
"a class or a mapper argument (received: %s)"
% (self.key, type(self.argument)))
assert isinstance(mapper_, mapper.Mapper), mapper_
return mapper_
@util.memoized_property
@util.deprecated("0.7", "Use .target")
def table(self):
"""Return the selectable linked to this
:class:`.RelationshipProperty` object's target
:class:`.Mapper`."""
return self.target
def do_init(self):
self._check_conflicts()
self._process_dependent_arguments()
self._determine_joins()
self._determine_synchronize_pairs()
self._determine_direction()
self._determine_local_remote_pairs()
self._post_init()
self._generate_backref()
super(RelationshipProperty, self).do_init()
def _check_conflicts(self):
"""Test that this relationship is legal, warn about
inheritance conflicts."""
if not self.is_primary() \
and not mapper.class_mapper(
self.parent.class_,
compile=False).has_property(self.key):
raise sa_exc.ArgumentError("Attempting to assign a new "
"relationship '%s' to a non-primary mapper on "
"class '%s'. New relationships can only be added "
"to the primary mapper, i.e. the very first mapper "
"created for class '%s' " % (self.key,
self.parent.class_.__name__,
self.parent.class_.__name__))
if not self.parent.concrete:
for inheriting in self.parent.iterate_to_root():
if inheriting is not self.parent \
and inheriting.has_property(self.key):
util.warn("Warning: relationship '%s' on mapper "
"'%s' supersedes the same relationship "
"on inherited mapper '%s'; this can "
"cause dependency issues during flush"
% (self.key, self.parent, inheriting))
def _process_dependent_arguments(self):
"""Convert incoming configuration arguments to their
proper form.
Callables are resolved, ORM annotations removed.
"""
for attr in (
'order_by',
'primaryjoin',
'secondaryjoin',
'secondary',
'_user_defined_foreign_keys',
'remote_side',
):
attr_value = getattr(self, attr)
if util.callable(attr_value):
setattr(self, attr, attr_value())
for attr in 'primaryjoin', 'secondaryjoin':
val = getattr(self, attr)
if val is not None:
setattr(self, attr, _orm_deannotate(
expression._only_column_elements(val, attr))
)
if self.order_by is not False and self.order_by is not None:
self.order_by = [
expression._only_column_elements(x, "order_by")
for x in
util.to_list(self.order_by)]
self._user_defined_foreign_keys = \
util.column_set(
expression._only_column_elements(x, "foreign_keys")
for x in util.to_column_set(
self._user_defined_foreign_keys
))
self.remote_side = \
util.column_set(
expression._only_column_elements(x, "remote_side")
for x in
util.to_column_set(self.remote_side))
self.target = self.mapper.mapped_table
if self.cascade.delete_orphan:
self.mapper.primary_mapper().delete_orphans.append(
(self.key, self.parent.class_)
)
def _determine_joins(self):
"""Determine the 'primaryjoin' and 'secondaryjoin' attributes,
if not passed to the constructor already.
This is based on analysis of the foreign key relationships
between the parent and target mapped selectables.
"""
if self.secondaryjoin is not None and self.secondary is None:
raise sa_exc.ArgumentError("Property '" + self.key
+ "' specified with secondary join condition but "
"no secondary argument")
def _search_for_join(mapper, table):
# the given table. will try the mapper's local table first
return join_condition(mapper.mapped_table, table,
a_subset=mapper.local_table)
try:
if self.secondary is not None:
if self.secondaryjoin is None:
self.secondaryjoin = _search_for_join(self.mapper,
self.secondary)
if self.primaryjoin is None:
self.primaryjoin = _search_for_join(self.parent,
self.secondary)
else:
if self.primaryjoin is None:
self.primaryjoin = _search_for_join(self.parent,
self.target)
except sa_exc.ArgumentError, e:
raise sa_exc.ArgumentError("Could not determine join "
"condition between parent/child tables on "
"relationship %s. Specify a 'primaryjoin' "
"expression. If 'secondary' is present, "
"'secondaryjoin' is needed as well."
% self)
def _columns_are_mapped(self, *cols):
"""Return True if all columns in the given collection are
mapped by the tables referenced by this :class:`.Relationship`.
"""
for c in cols:
if self.secondary is not None \
and self.secondary.c.contains_column(c):
continue
if not self.parent.mapped_table.c.contains_column(c) and \
not self.target.c.contains_column(c):
return False
return True
def _sync_pairs_from_join(self, join_condition, primary):
"""Determine a list of "source"/"destination" column pairs
based on the given join condition, as well as the
foreign keys argument.
"source" would be a column referenced by a foreign key,
and "destination" would be the column who has a foreign key
reference to "source".
"""
fks = self._user_defined_foreign_keys
eq_pairs = criterion_as_pairs(join_condition,
consider_as_foreign_keys=fks,
any_operator=self.viewonly)
# "secondary" - assume the "secondary" columns
# are the fks
if not eq_pairs and \
self.secondary is not None and \
not fks:
fks = set(self.secondary.c)
eq_pairs = criterion_as_pairs(join_condition,
consider_as_foreign_keys=fks,
any_operator=self.viewonly)
if eq_pairs:
util.warn("No ForeignKey objects were present "
"in secondary table '%s'. Assumed referenced "
"foreign key columns %s for join condition '%s' "
"on relationship %s" % (
self.secondary.description,
", ".join(sorted(["'%s'" % col for col in fks])),
join_condition,
self
))
# Filter out just to columns that are mapped.
# If viewonly, allow pairs where the FK col
# was part of "foreign keys" - the column it references
# may be in an un-mapped table - see
# test.orm.test_relationships.ViewOnlyComplexJoin.test_basic
# for an example of this.
eq_pairs = [(l, r) for (l, r) in eq_pairs
if self._columns_are_mapped(l, r)
or self.viewonly and
r in fks]
if eq_pairs:
return eq_pairs
# from here below is just determining the best error message
# to report. Check for a join condition using any operator
# (not just ==), perhaps they need to turn on "viewonly=True".
if not self.viewonly and criterion_as_pairs(join_condition,
consider_as_foreign_keys=self._user_defined_foreign_keys,
any_operator=True):
err = "Could not locate any "\
"foreign-key-equated, locally mapped column "\
"pairs for %s "\
"condition '%s' on relationship %s." % (
primary and 'primaryjoin' or 'secondaryjoin',
join_condition,
self
)
if not self._user_defined_foreign_keys:
err += " Ensure that the "\
"referencing Column objects have a "\
"ForeignKey present, or are otherwise part "\
"of a ForeignKeyConstraint on their parent "\
"Table, or specify the foreign_keys parameter "\
"to this relationship."
err += " For more "\
"relaxed rules on join conditions, the "\
"relationship may be marked as viewonly=True."
raise sa_exc.ArgumentError(err)
else:
if self._user_defined_foreign_keys:
raise sa_exc.ArgumentError("Could not determine "
"relationship direction for %s condition "
"'%s', on relationship %s, using manual "
"'foreign_keys' setting. Do the columns "
"in 'foreign_keys' represent all, and "
"only, the 'foreign' columns in this join "
"condition? Does the %s Table already "
"have adequate ForeignKey and/or "
"ForeignKeyConstraint objects established "
"(in which case 'foreign_keys' is usually "
"unnecessary)?"
% (
primary and 'primaryjoin' or 'secondaryjoin',
join_condition,
self,
primary and 'mapped' or 'secondary'
))
else:
raise sa_exc.ArgumentError("Could not determine "
"relationship direction for %s condition "
"'%s', on relationship %s. Ensure that the "
"referencing Column objects have a "
"ForeignKey present, or are otherwise part "
"of a ForeignKeyConstraint on their parent "
"Table, or specify the foreign_keys parameter "
"to this relationship."
% (
primary and 'primaryjoin' or 'secondaryjoin',
join_condition,
self
))
def _determine_synchronize_pairs(self):
"""Resolve 'primary'/foreign' column pairs from the primaryjoin
and secondaryjoin arguments.
"""
if self.local_remote_pairs:
if not self._user_defined_foreign_keys:
raise sa_exc.ArgumentError(
"foreign_keys argument is "
"required with _local_remote_pairs argument")
self.synchronize_pairs = []
for l, r in self.local_remote_pairs:
if r in self._user_defined_foreign_keys:
self.synchronize_pairs.append((l, r))
elif l in self._user_defined_foreign_keys:
self.synchronize_pairs.append((r, l))
else:
self.synchronize_pairs = self._sync_pairs_from_join(
self.primaryjoin,
True)
self._calculated_foreign_keys = util.column_set(
r for (l, r) in
self.synchronize_pairs)
if self.secondaryjoin is not None:
self.secondary_synchronize_pairs = self._sync_pairs_from_join(
self.secondaryjoin,
False)
self._calculated_foreign_keys.update(
r for (l, r) in
self.secondary_synchronize_pairs)
else:
self.secondary_synchronize_pairs = None
def _determine_direction(self):
"""Determine if this relationship is one to many, many to one,
many to many.
This is derived from the primaryjoin, presence of "secondary",
and in the case of self-referential the "remote side".
"""
if self.secondaryjoin is not None:
self.direction = MANYTOMANY
elif self._refers_to_parent_table():
if self.local_remote_pairs:
remote = [r for (l, r) in self.local_remote_pairs]
elif self.remote_side:
remote = self.remote_side
else:
remote = None
if not remote or self._calculated_foreign_keys.difference(l for (l,
r) in self.synchronize_pairs).intersection(remote):
self.direction = ONETOMANY
else:
self.direction = MANYTOONE
else:
parentcols = util.column_set(self.parent.mapped_table.c)
targetcols = util.column_set(self.mapper.mapped_table.c)
onetomany_fk = targetcols.intersection(
self._calculated_foreign_keys)
manytoone_fk = parentcols.intersection(
self._calculated_foreign_keys)
if onetomany_fk and manytoone_fk:
referents = [c for (c, f) in self.synchronize_pairs]
onetomany_local = parentcols.intersection(referents)
manytoone_local = targetcols.intersection(referents)
if onetomany_local and not manytoone_local:
self.direction = ONETOMANY
elif manytoone_local and not onetomany_local:
self.direction = MANYTOONE
else:
raise sa_exc.ArgumentError(
"Can't determine relationship"
" direction for relationship '%s' - foreign "
"key columns are present in both the parent "
"and the child's mapped tables. Specify "
"'foreign_keys' argument." % self)
elif onetomany_fk:
self.direction = ONETOMANY
elif manytoone_fk:
self.direction = MANYTOONE
else:
raise sa_exc.ArgumentError("Can't determine relationship "
"direction for relationship '%s' - foreign "
"key columns are present in neither the parent "
"nor the child's mapped tables" % self)
if self.cascade.delete_orphan and not self.single_parent \
and (self.direction is MANYTOMANY or self.direction
is MANYTOONE):
util.warn('On %s, delete-orphan cascade is not supported '
'on a many-to-many or many-to-one relationship '
'when single_parent is not set. Set '
'single_parent=True on the relationship().'
% self)
if self.direction is MANYTOONE and self.passive_deletes:
util.warn("On %s, 'passive_deletes' is normally configured "
"on one-to-many, one-to-one, many-to-many "
"relationships only."
% self)
def _determine_local_remote_pairs(self):
"""Determine pairs of columns representing "local" to
"remote", where "local" columns are on the parent mapper,
"remote" are on the target mapper.
These pairs are used on the load side only to generate
lazy loading clauses.
"""
if not self.local_remote_pairs and not self.remote_side:
eq_pairs = util.unique_list(
self.synchronize_pairs +
(self.secondary_synchronize_pairs or []))
if self.direction is MANYTOONE:
self.local_remote_pairs = [(r, l) for l, r in eq_pairs]
else:
self.local_remote_pairs = eq_pairs
elif self.remote_side:
if self.local_remote_pairs:
raise sa_exc.ArgumentError('remote_side argument is '
'redundant against more detailed '
'_local_remote_side argument.')
if self.direction is MANYTOONE:
self.local_remote_pairs = [(r, l) for (l, r) in
criterion_as_pairs(self.primaryjoin,
consider_as_referenced_keys=self.remote_side,
any_operator=True)]
else:
self.local_remote_pairs = \
criterion_as_pairs(self.primaryjoin,
consider_as_foreign_keys=self.remote_side,
any_operator=True)
if not self.local_remote_pairs:
raise sa_exc.ArgumentError('Relationship %s could '
'not determine any local/remote column '
'pairs from remote side argument %r'
% (self, self.remote_side))
self.local_side = util.ordered_column_set(
l for l, r in self.local_remote_pairs)
self.remote_side = util.ordered_column_set(
r for l, r in self.local_remote_pairs)
if self.direction is ONETOMANY:
for col in self.local_side:
if not self._columns_are_mapped(col):
raise sa_exc.ArgumentError(
"Local column '%s' is not "
"part of mapping %s. Specify remote_side "
"argument to indicate which column lazy join "
"condition should compare against." % (col,
self.parent))
elif self.direction is MANYTOONE:
for col in self.remote_side:
if not self._columns_are_mapped(col):
raise sa_exc.ArgumentError(
"Remote column '%s' is not "
"part of mapping %s. Specify remote_side "
"argument to indicate which column lazy join "
"condition should bind." % (col, self.mapper))
def _generate_backref(self):
if not self.is_primary():
return
if self.backref is not None and not self.back_populates:
if isinstance(self.backref, basestring):
backref_key, kwargs = self.backref, {}
else:
backref_key, kwargs = self.backref
mapper = self.mapper.primary_mapper()
if mapper.has_property(backref_key):
raise sa_exc.ArgumentError("Error creating backref "
"'%s' on relationship '%s': property of that "
"name exists on mapper '%s'" % (backref_key,
self, mapper))
if self.secondary is not None:
pj = kwargs.pop('primaryjoin', self.secondaryjoin)
sj = kwargs.pop('secondaryjoin', self.primaryjoin)
else:
pj = kwargs.pop('primaryjoin', self.primaryjoin)
sj = kwargs.pop('secondaryjoin', None)
if sj:
raise sa_exc.InvalidRequestError(
"Can't assign 'secondaryjoin' on a backref against "
"a non-secondary relationship."
)
foreign_keys = kwargs.pop('foreign_keys',
self._user_defined_foreign_keys)
parent = self.parent.primary_mapper()
kwargs.setdefault('viewonly', self.viewonly)
kwargs.setdefault('post_update', self.post_update)
kwargs.setdefault('passive_updates', self.passive_updates)
self.back_populates = backref_key
relationship = RelationshipProperty(
parent,
self.secondary,
pj,
sj,
foreign_keys=foreign_keys,
back_populates=self.key,
**kwargs
)
mapper._configure_property(backref_key, relationship)
if self.back_populates:
self._add_reverse_property(self.back_populates)
def _post_init(self):
self.logger.info('%s setup primary join %s', self,
self.primaryjoin)
self.logger.info('%s setup secondary join %s', self,
self.secondaryjoin)
self.logger.info('%s synchronize pairs [%s]', self,
','.join('(%s => %s)' % (l, r) for (l, r) in
self.synchronize_pairs))
self.logger.info('%s secondary synchronize pairs [%s]', self,
','.join('(%s => %s)' % (l, r) for (l, r) in
self.secondary_synchronize_pairs or []))
self.logger.info('%s local/remote pairs [%s]', self,
','.join('(%s / %s)' % (l, r) for (l, r) in
self.local_remote_pairs))
self.logger.info('%s relationship direction %s', self,
self.direction)
if self.uselist is None:
self.uselist = self.direction is not MANYTOONE
if not self.viewonly:
self._dependency_processor = \
dependency.DependencyProcessor.from_relationship(self)
@util.memoized_property
def _use_get(self):
"""memoize the 'use_get' attribute of this RelationshipLoader's
lazyloader."""
strategy = self._get_strategy(strategies.LazyLoader)
return strategy.use_get
def _refers_to_parent_table(self):
pt = self.parent.mapped_table
mt = self.mapper.mapped_table
for c, f in self.synchronize_pairs:
if (
pt.is_derived_from(c.table) and \
pt.is_derived_from(f.table) and \
mt.is_derived_from(c.table) and \
mt.is_derived_from(f.table)
):
return True
else:
return False
@util.memoized_property
def _is_self_referential(self):
return self.mapper.common_parent(self.parent)
def per_property_preprocessors(self, uow):
if not self.viewonly and self._dependency_processor:
self._dependency_processor.per_property_preprocessors(uow)
def _create_joins(self, source_polymorphic=False,
source_selectable=None, dest_polymorphic=False,
dest_selectable=None, of_type=None):
if source_selectable is None:
if source_polymorphic and self.parent.with_polymorphic:
source_selectable = self.parent._with_polymorphic_selectable
aliased = False
if dest_selectable is None:
if dest_polymorphic and self.mapper.with_polymorphic:
dest_selectable = self.mapper._with_polymorphic_selectable
aliased = True
else:
dest_selectable = self.mapper.mapped_table
if self._is_self_referential and source_selectable is None:
dest_selectable = dest_selectable.alias()
aliased = True
else:
aliased = True
# its internal structure remains fixed
# regardless of context.
dest_selectable = _shallow_annotate(
dest_selectable,
{'no_replacement_traverse':True})
aliased = aliased or (source_selectable is not None)
primaryjoin, secondaryjoin, secondary = self.primaryjoin, \
self.secondaryjoin, self.secondary
# adjust the join condition for single table inheritance,
# in the case that the join is to a subclass
# this is analogous to the "_adjust_for_single_table_inheritance()"
# method in Query.
dest_mapper = of_type or self.mapper
single_crit = dest_mapper._single_table_criterion
if single_crit is not None:
if secondaryjoin is not None:
secondaryjoin = secondaryjoin & single_crit
else:
primaryjoin = primaryjoin & single_crit
if aliased:
if secondary is not None:
secondary = secondary.alias()
primary_aliasizer = ClauseAdapter(secondary)
secondary_aliasizer = \
ClauseAdapter(dest_selectable,
equivalents=self.mapper._equivalent_columns).\
chain(primary_aliasizer)
if source_selectable is not None:
primary_aliasizer = \
ClauseAdapter(secondary).\
chain(ClauseAdapter(source_selectable,
equivalents=self.parent._equivalent_columns))
secondaryjoin = \
secondary_aliasizer.traverse(secondaryjoin)
else:
primary_aliasizer = ClauseAdapter(dest_selectable,
exclude=self.local_side,
equivalents=self.mapper._equivalent_columns)
if source_selectable is not None:
primary_aliasizer.chain(
ClauseAdapter(source_selectable,
exclude=self.remote_side,
equivalents=self.parent._equivalent_columns))
secondary_aliasizer = None
primaryjoin = primary_aliasizer.traverse(primaryjoin)
target_adapter = secondary_aliasizer or primary_aliasizer
target_adapter.include = target_adapter.exclude = None
else:
target_adapter = None
if source_selectable is None:
source_selectable = self.parent.local_table
if dest_selectable is None:
dest_selectable = self.mapper.local_table
return (
primaryjoin,
secondaryjoin,
source_selectable,
dest_selectable,
secondary,
target_adapter,
)
PropertyLoader = RelationProperty = RelationshipProperty
log.class_logger(RelationshipProperty)
| false | true |
f71ecf94681232dc19499ce57aba3e589002f9f4 | 20,443 | py | Python | test/functional/test_framework/test_framework.py | simulacrum-trade/simulacrum | 3e1962be7f9b58a1e6930658be3642de47830d74 | [
"MIT"
] | 1 | 2020-01-23T13:50:58.000Z | 2020-01-23T13:50:58.000Z | test/functional/test_framework/test_framework.py | simulacrum-trade/simulacrum | 3e1962be7f9b58a1e6930658be3642de47830d74 | [
"MIT"
] | 1 | 2020-04-01T13:14:36.000Z | 2020-04-09T06:35:37.000Z | test/functional/test_framework/test_framework.py | simulacrum-trade/simulacrum | 3e1962be7f9b58a1e6930658be3642de47830d74 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# Copyright (c) 2014-2017 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Base class for RPC testing."""
from enum import Enum
import logging
import optparse
import os
import pdb
import shutil
import sys
import tempfile
import time
from .authproxy import JSONRPCException
from . import coverage
from .test_node import TestNode
from .util import (
MAX_NODES,
PortSeed,
assert_equal,
check_json_precision,
connect_nodes_bi,
disconnect_nodes,
get_datadir_path,
initialize_datadir,
p2p_port,
set_node_times,
sync_blocks,
sync_mempools,
)
class TestStatus(Enum):
PASSED = 1
FAILED = 2
SKIPPED = 3
TEST_EXIT_PASSED = 0
TEST_EXIT_FAILED = 1
TEST_EXIT_SKIPPED = 77
class BitcoinTestFramework():
"""Base class for a simulacrum test script.
Individual simulacrum test scripts should subclass this class and override the set_test_params() and run_test() methods.
Individual tests can also override the following methods to customize the test setup:
- add_options()
- setup_chain()
- setup_network()
- setup_nodes()
The __init__() and main() methods should not be overridden.
This class also contains various public and private helper methods."""
def __init__(self):
"""Sets test framework defaults. Do not override this method. Instead, override the set_test_params() method"""
self.setup_clean_chain = False
self.nodes = []
self.mocktime = 0
self.supports_cli = False
self.set_test_params()
assert hasattr(self, "num_nodes"), "Test must set self.num_nodes in set_test_params()"
def main(self):
"""Main function. This should not be overridden by the subclass test scripts."""
parser = optparse.OptionParser(usage="%prog [options]")
parser.add_option("--nocleanup", dest="nocleanup", default=False, action="store_true",
help="Leave simulacrumds and test.* datadir on exit or error")
parser.add_option("--noshutdown", dest="noshutdown", default=False, action="store_true",
help="Don't stop simulacrumds after the test execution")
parser.add_option("--srcdir", dest="srcdir", default=os.path.normpath(os.path.dirname(os.path.realpath(__file__))+"/../../../src"),
help="Source directory containing simulacrumd/simulacrum-cli (default: %default)")
parser.add_option("--cachedir", dest="cachedir", default=os.path.normpath(os.path.dirname(os.path.realpath(__file__)) + "/../../cache"),
help="Directory for caching pregenerated datadirs")
parser.add_option("--tmpdir", dest="tmpdir", help="Root directory for datadirs")
parser.add_option("-l", "--loglevel", dest="loglevel", default="INFO",
help="log events at this level and higher to the console. Can be set to DEBUG, INFO, WARNING, ERROR or CRITICAL. Passing --loglevel DEBUG will output all logs to console. Note that logs at all levels are always written to the test_framework.log file in the temporary test directory.")
parser.add_option("--tracerpc", dest="trace_rpc", default=False, action="store_true",
help="Print out all RPC calls as they are made")
parser.add_option("--portseed", dest="port_seed", default=os.getpid(), type='int',
help="The seed to use for assigning port numbers (default: current process id)")
parser.add_option("--coveragedir", dest="coveragedir",
help="Write tested RPC commands into this directory")
parser.add_option("--configfile", dest="configfile",
help="Location of the test framework config file")
parser.add_option("--pdbonfailure", dest="pdbonfailure", default=False, action="store_true",
help="Attach a python debugger if test fails")
parser.add_option("--usecli", dest="usecli", default=False, action="store_true",
help="use bitcoin-cli instead of RPC for all commands")
self.add_options(parser)
(self.options, self.args) = parser.parse_args()
PortSeed.n = self.options.port_seed
os.environ['PATH'] = self.options.srcdir + ":" + self.options.srcdir + "/qt:" + os.environ['PATH']
check_json_precision()
self.options.cachedir = os.path.abspath(self.options.cachedir)
# Set up temp directory and start logging
if self.options.tmpdir:
self.options.tmpdir = os.path.abspath(self.options.tmpdir)
os.makedirs(self.options.tmpdir, exist_ok=False)
else:
self.options.tmpdir = tempfile.mkdtemp(prefix="test")
self._start_logging()
success = TestStatus.FAILED
try:
if self.options.usecli and not self.supports_cli:
raise SkipTest("--usecli specified but test does not support using CLI")
self.setup_chain()
self.setup_network()
time.sleep(5)
self.run_test()
success = TestStatus.PASSED
except JSONRPCException as e:
self.log.exception("JSONRPC error")
except SkipTest as e:
self.log.warning("Test Skipped: %s" % e.message)
success = TestStatus.SKIPPED
except AssertionError as e:
self.log.exception("Assertion failed")
except KeyError as e:
self.log.exception("Key error")
except Exception as e:
self.log.exception("Unexpected exception caught during testing")
except KeyboardInterrupt as e:
self.log.warning("Exiting after keyboard interrupt")
if success == TestStatus.FAILED and self.options.pdbonfailure:
print("Testcase failed. Attaching python debugger. Enter ? for help")
pdb.set_trace()
if not self.options.noshutdown:
self.log.info("Stopping nodes")
if self.nodes:
self.stop_nodes()
else:
for node in self.nodes:
node.cleanup_on_exit = False
self.log.info("Note: simulacrumds were not stopped and may still be running")
if not self.options.nocleanup and not self.options.noshutdown and success != TestStatus.FAILED:
self.log.info("Cleaning up")
shutil.rmtree(self.options.tmpdir)
else:
self.log.warning("Not cleaning up dir %s" % self.options.tmpdir)
if success == TestStatus.PASSED:
self.log.info("Tests successful")
exit_code = TEST_EXIT_PASSED
elif success == TestStatus.SKIPPED:
self.log.info("Test skipped")
exit_code = TEST_EXIT_SKIPPED
else:
self.log.error("Test failed. Test logging available at %s/test_framework.log", self.options.tmpdir)
self.log.error("Hint: Call {} '{}' to consolidate all logs".format(os.path.normpath(os.path.dirname(os.path.realpath(__file__)) + "/../combine_logs.py"), self.options.tmpdir))
exit_code = TEST_EXIT_FAILED
logging.shutdown()
sys.exit(exit_code)
# Methods to override in subclass test scripts.
def set_test_params(self):
"""Tests must this method to change default values for number of nodes, topology, etc"""
raise NotImplementedError
def add_options(self, parser):
"""Override this method to add command-line options to the test"""
pass
def setup_chain(self):
"""Override this method to customize blockchain setup"""
self.log.info("Initializing test directory " + self.options.tmpdir)
if self.setup_clean_chain:
self._initialize_chain_clean()
else:
self._initialize_chain()
def setup_network(self):
"""Override this method to customize test network topology"""
self.setup_nodes()
# Connect the nodes as a "chain". This allows us
# to split the network between nodes 1 and 2 to get
# two halves that can work on competing chains.
for i in range(self.num_nodes - 1):
connect_nodes_bi(self.nodes, i, i + 1)
self.sync_all()
def setup_nodes(self):
"""Override this method to customize test node setup"""
extra_args = None
if hasattr(self, "extra_args"):
extra_args = self.extra_args
self.add_nodes(self.num_nodes, extra_args)
self.start_nodes()
def run_test(self):
"""Tests must override this method to define test logic"""
raise NotImplementedError
# Public helper methods. These can be accessed by the subclass test scripts.
def add_nodes(self, num_nodes, extra_args=None, rpchost=None, timewait=None, binary=None):
"""Instantiate TestNode objects"""
if extra_args is None:
extra_args = [[]] * num_nodes
if binary is None:
binary = [None] * num_nodes
assert_equal(len(extra_args), num_nodes)
assert_equal(len(binary), num_nodes)
for i in range(num_nodes):
self.nodes.append(TestNode(i, self.options.tmpdir, extra_args[i], rpchost, timewait=timewait, binary=binary[i], stderr=None, mocktime=self.mocktime, coverage_dir=self.options.coveragedir, use_cli=self.options.usecli))
def start_node(self, i, *args, **kwargs):
"""Start a simulacrumd"""
node = self.nodes[i]
node.start(*args, **kwargs)
node.wait_for_rpc_connection()
time.sleep(10)
if self.options.coveragedir is not None:
coverage.write_all_rpc_commands(self.options.coveragedir, node.rpc)
def start_nodes(self, extra_args=None, *args, **kwargs):
"""Start multiple simulacrumds"""
if extra_args is None:
extra_args = [None] * self.num_nodes
assert_equal(len(extra_args), self.num_nodes)
try:
for i, node in enumerate(self.nodes):
node.start(extra_args[i], *args, **kwargs)
for node in self.nodes:
node.wait_for_rpc_connection()
except:
# If one node failed to start, stop the others
self.stop_nodes()
raise
time.sleep(10)
if self.options.coveragedir is not None:
for node in self.nodes:
coverage.write_all_rpc_commands(self.options.coveragedir, node.rpc)
def stop_node(self, i):
"""Stop a simulacrumd test node"""
self.nodes[i].stop_node()
self.nodes[i].wait_until_stopped()
def stop_nodes(self):
"""Stop multiple simulacrumd test nodes"""
for node in self.nodes:
# Issue RPC to stop nodes
node.stop_node()
for node in self.nodes:
# Wait for nodes to stop
time.sleep(5)
node.wait_until_stopped()
def restart_node(self, i, extra_args=None):
"""Stop and start a test node"""
self.stop_node(i)
self.start_node(i, extra_args)
def assert_start_raises_init_error(self, i, extra_args=None, expected_msg=None, *args, **kwargs):
with tempfile.SpooledTemporaryFile(max_size=2**16) as log_stderr:
try:
self.start_node(i, extra_args, stderr=log_stderr, *args, **kwargs)
self.stop_node(i)
except Exception as e:
assert 'simulacrumd exited' in str(e) # node must have shutdown
self.nodes[i].running = False
self.nodes[i].process = None
if expected_msg is not None:
log_stderr.seek(0)
stderr = log_stderr.read().decode('utf-8')
if expected_msg not in stderr:
raise AssertionError("Expected error \"" + expected_msg + "\" not found in:\n" + stderr)
else:
if expected_msg is None:
assert_msg = "simulacrumd should have exited with an error"
else:
assert_msg = "simulacrumd should have exited with expected error " + expected_msg
raise AssertionError(assert_msg)
def wait_for_node_exit(self, i, timeout):
self.nodes[i].process.wait(timeout)
def split_network(self):
"""
Split the network of four nodes into nodes 0/1 and 2/3.
"""
disconnect_nodes(self.nodes[1], 2)
disconnect_nodes(self.nodes[2], 1)
self.sync_all([self.nodes[:2], self.nodes[2:]])
def join_network(self):
"""
Join the (previously split) network halves together.
"""
connect_nodes_bi(self.nodes, 1, 2)
self.sync_all()
def sync_all(self, node_groups=None):
if not node_groups:
node_groups = [self.nodes]
for group in node_groups:
sync_blocks(group)
sync_mempools(group)
def enable_mocktime(self):
"""Enable mocktime for the script.
mocktime may be needed for scripts that use the cached version of the
blockchain. If the cached version of the blockchain is used without
mocktime then the mempools will not sync due to IBD.
For backwared compatibility of the python scripts with previous
versions of the cache, this helper function sets mocktime to Jan 1,
2014 + (201 * 10 * 60)"""
self.mocktime = 1454124732 + (201 * 10 * 60)
def disable_mocktime(self):
self.mocktime = 0
# Private helper methods. These should not be accessed by the subclass test scripts.
def _start_logging(self):
# Add logger and logging handlers
self.log = logging.getLogger('TestFramework')
self.log.setLevel(logging.DEBUG)
# Create file handler to log all messages
fh = logging.FileHandler(self.options.tmpdir + '/test_framework.log')
fh.setLevel(logging.DEBUG)
# Create console handler to log messages to stderr. By default this logs only error messages, but can be configured with --loglevel.
ch = logging.StreamHandler(sys.stdout)
# User can provide log level as a number or string (eg DEBUG). loglevel was caught as a string, so try to convert it to an int
ll = int(self.options.loglevel) if self.options.loglevel.isdigit() else self.options.loglevel.upper()
ch.setLevel(ll)
# Format logs the same as simulacrumd's debug.log with microprecision (so log files can be concatenated and sorted)
formatter = logging.Formatter(fmt='%(asctime)s.%(msecs)03d000 %(name)s (%(levelname)s): %(message)s', datefmt='%Y-%m-%d %H:%M:%S')
formatter.converter = time.gmtime
fh.setFormatter(formatter)
ch.setFormatter(formatter)
# add the handlers to the logger
self.log.addHandler(fh)
self.log.addHandler(ch)
if self.options.trace_rpc:
rpc_logger = logging.getLogger("BitcoinRPC")
rpc_logger.setLevel(logging.DEBUG)
rpc_handler = logging.StreamHandler(sys.stdout)
rpc_handler.setLevel(logging.DEBUG)
rpc_logger.addHandler(rpc_handler)
def _initialize_chain(self):
"""Initialize a pre-mined blockchain for use by the test.
Create a cache of a 200-block-long chain (with wallet) for MAX_NODES
Afterward, create num_nodes copies from the cache."""
assert self.num_nodes <= MAX_NODES
create_cache = False
for i in range(MAX_NODES):
if not os.path.isdir(get_datadir_path(self.options.cachedir, i)):
create_cache = True
break
if create_cache:
self.log.debug("Creating data directories from cached datadir")
# find and delete old cache directories if any exist
for i in range(MAX_NODES):
if os.path.isdir(get_datadir_path(self.options.cachedir, i)):
shutil.rmtree(get_datadir_path(self.options.cachedir, i))
# Create cache directories, run bitcoinds:
for i in range(MAX_NODES):
datadir = initialize_datadir(self.options.cachedir, i)
args = [os.getenv("BITCOIND", "simulacrumd"), "-spendzeroconfchange=1", "-server", "-keypool=1", "-datadir=" + datadir, "-discover=0"]
if i > 0:
args.append("-connect=127.0.0.1:" + str(p2p_port(0)))
self.nodes.append(TestNode(i, self.options.cachedir, extra_args=[], rpchost=None, timewait=None, binary=None, stderr=None, mocktime=self.mocktime, coverage_dir=None))
self.nodes[i].args = args
self.start_node(i)
# Wait for RPC connections to be ready
for node in self.nodes:
node.wait_for_rpc_connection()
# Create a 200-block-long chain; each of the 4 first nodes
# gets 25 mature blocks and 25 immature.
# Note: To preserve compatibility with older versions of
# initialize_chain, only 4 nodes will generate coins.
#
# blocks are created with timestamps 10 minutes apart
# starting from 2010 minutes in the past
self.enable_mocktime()
block_time = self.mocktime - (201 * 60)
for i in range(2):
for peer in range(4):
for j in range(25):
set_node_times(self.nodes, block_time)
self.nodes[peer].generate(1)
block_time += 60
# Must sync before next peer starts generating blocks
sync_blocks(self.nodes)
# Shut them down, and clean up cache directories:
self.stop_nodes()
self.nodes = []
self.disable_mocktime()
def cache_path(n, *paths):
return os.path.join(get_datadir_path(self.options.cachedir, n), "regtest", *paths)
for i in range(MAX_NODES):
for entry in os.listdir(cache_path(i)):
if entry not in ['wallet.dat', 'chainstate', 'blocks', 'sporks', 'zerocoin', 'backups']:
os.remove(cache_path(i, entry))
for i in range(self.num_nodes):
from_dir = get_datadir_path(self.options.cachedir, i)
to_dir = get_datadir_path(self.options.tmpdir, i)
shutil.copytree(from_dir, to_dir)
initialize_datadir(self.options.tmpdir, i) # Overwrite port/rpcport in bitcoin.conf
def _initialize_chain_clean(self):
"""Initialize empty blockchain for use by the test.
Create an empty blockchain and num_nodes wallets.
Useful if a test case wants complete control over initialization."""
for i in range(self.num_nodes):
initialize_datadir(self.options.tmpdir, i)
class ComparisonTestFramework(BitcoinTestFramework):
"""Test framework for doing p2p comparison testing
Sets up some simulacrumd binaries:
- 1 binary: test binary
- 2 binaries: 1 test binary, 1 ref binary
- n>2 binaries: 1 test binary, n-1 ref binaries"""
def set_test_params(self):
self.num_nodes = 2
self.setup_clean_chain = True
def add_options(self, parser):
parser.add_option("--testbinary", dest="testbinary",
default=os.getenv("BITCOIND", "simulacrumd"),
help="simulacrumd binary to test")
parser.add_option("--refbinary", dest="refbinary",
default=os.getenv("BITCOIND", "simulacrumd"),
help="simulacrumd binary to use for reference nodes (if any)")
def setup_network(self):
extra_args = [['-whitelist=127.0.0.1']] * self.num_nodes
if hasattr(self, "extra_args"):
extra_args = self.extra_args
self.add_nodes(self.num_nodes, extra_args,
binary=[self.options.testbinary] +
[self.options.refbinary] * (self.num_nodes - 1))
self.start_nodes()
class SkipTest(Exception):
"""This exception is raised to skip a test"""
def __init__(self, message):
self.message = message
| 41.977413 | 310 | 0.621827 |
from enum import Enum
import logging
import optparse
import os
import pdb
import shutil
import sys
import tempfile
import time
from .authproxy import JSONRPCException
from . import coverage
from .test_node import TestNode
from .util import (
MAX_NODES,
PortSeed,
assert_equal,
check_json_precision,
connect_nodes_bi,
disconnect_nodes,
get_datadir_path,
initialize_datadir,
p2p_port,
set_node_times,
sync_blocks,
sync_mempools,
)
class TestStatus(Enum):
PASSED = 1
FAILED = 2
SKIPPED = 3
TEST_EXIT_PASSED = 0
TEST_EXIT_FAILED = 1
TEST_EXIT_SKIPPED = 77
class BitcoinTestFramework():
def __init__(self):
self.setup_clean_chain = False
self.nodes = []
self.mocktime = 0
self.supports_cli = False
self.set_test_params()
assert hasattr(self, "num_nodes"), "Test must set self.num_nodes in set_test_params()"
def main(self):
parser = optparse.OptionParser(usage="%prog [options]")
parser.add_option("--nocleanup", dest="nocleanup", default=False, action="store_true",
help="Leave simulacrumds and test.* datadir on exit or error")
parser.add_option("--noshutdown", dest="noshutdown", default=False, action="store_true",
help="Don't stop simulacrumds after the test execution")
parser.add_option("--srcdir", dest="srcdir", default=os.path.normpath(os.path.dirname(os.path.realpath(__file__))+"/../../../src"),
help="Source directory containing simulacrumd/simulacrum-cli (default: %default)")
parser.add_option("--cachedir", dest="cachedir", default=os.path.normpath(os.path.dirname(os.path.realpath(__file__)) + "/../../cache"),
help="Directory for caching pregenerated datadirs")
parser.add_option("--tmpdir", dest="tmpdir", help="Root directory for datadirs")
parser.add_option("-l", "--loglevel", dest="loglevel", default="INFO",
help="log events at this level and higher to the console. Can be set to DEBUG, INFO, WARNING, ERROR or CRITICAL. Passing --loglevel DEBUG will output all logs to console. Note that logs at all levels are always written to the test_framework.log file in the temporary test directory.")
parser.add_option("--tracerpc", dest="trace_rpc", default=False, action="store_true",
help="Print out all RPC calls as they are made")
parser.add_option("--portseed", dest="port_seed", default=os.getpid(), type='int',
help="The seed to use for assigning port numbers (default: current process id)")
parser.add_option("--coveragedir", dest="coveragedir",
help="Write tested RPC commands into this directory")
parser.add_option("--configfile", dest="configfile",
help="Location of the test framework config file")
parser.add_option("--pdbonfailure", dest="pdbonfailure", default=False, action="store_true",
help="Attach a python debugger if test fails")
parser.add_option("--usecli", dest="usecli", default=False, action="store_true",
help="use bitcoin-cli instead of RPC for all commands")
self.add_options(parser)
(self.options, self.args) = parser.parse_args()
PortSeed.n = self.options.port_seed
os.environ['PATH'] = self.options.srcdir + ":" + self.options.srcdir + "/qt:" + os.environ['PATH']
check_json_precision()
self.options.cachedir = os.path.abspath(self.options.cachedir)
# Set up temp directory and start logging
if self.options.tmpdir:
self.options.tmpdir = os.path.abspath(self.options.tmpdir)
os.makedirs(self.options.tmpdir, exist_ok=False)
else:
self.options.tmpdir = tempfile.mkdtemp(prefix="test")
self._start_logging()
success = TestStatus.FAILED
try:
if self.options.usecli and not self.supports_cli:
raise SkipTest("--usecli specified but test does not support using CLI")
self.setup_chain()
self.setup_network()
time.sleep(5)
self.run_test()
success = TestStatus.PASSED
except JSONRPCException as e:
self.log.exception("JSONRPC error")
except SkipTest as e:
self.log.warning("Test Skipped: %s" % e.message)
success = TestStatus.SKIPPED
except AssertionError as e:
self.log.exception("Assertion failed")
except KeyError as e:
self.log.exception("Key error")
except Exception as e:
self.log.exception("Unexpected exception caught during testing")
except KeyboardInterrupt as e:
self.log.warning("Exiting after keyboard interrupt")
if success == TestStatus.FAILED and self.options.pdbonfailure:
print("Testcase failed. Attaching python debugger. Enter ? for help")
pdb.set_trace()
if not self.options.noshutdown:
self.log.info("Stopping nodes")
if self.nodes:
self.stop_nodes()
else:
for node in self.nodes:
node.cleanup_on_exit = False
self.log.info("Note: simulacrumds were not stopped and may still be running")
if not self.options.nocleanup and not self.options.noshutdown and success != TestStatus.FAILED:
self.log.info("Cleaning up")
shutil.rmtree(self.options.tmpdir)
else:
self.log.warning("Not cleaning up dir %s" % self.options.tmpdir)
if success == TestStatus.PASSED:
self.log.info("Tests successful")
exit_code = TEST_EXIT_PASSED
elif success == TestStatus.SKIPPED:
self.log.info("Test skipped")
exit_code = TEST_EXIT_SKIPPED
else:
self.log.error("Test failed. Test logging available at %s/test_framework.log", self.options.tmpdir)
self.log.error("Hint: Call {} '{}' to consolidate all logs".format(os.path.normpath(os.path.dirname(os.path.realpath(__file__)) + "/../combine_logs.py"), self.options.tmpdir))
exit_code = TEST_EXIT_FAILED
logging.shutdown()
sys.exit(exit_code)
# Methods to override in subclass test scripts.
def set_test_params(self):
raise NotImplementedError
def add_options(self, parser):
pass
def setup_chain(self):
self.log.info("Initializing test directory " + self.options.tmpdir)
if self.setup_clean_chain:
self._initialize_chain_clean()
else:
self._initialize_chain()
def setup_network(self):
self.setup_nodes()
# Connect the nodes as a "chain". This allows us
# to split the network between nodes 1 and 2 to get
# two halves that can work on competing chains.
for i in range(self.num_nodes - 1):
connect_nodes_bi(self.nodes, i, i + 1)
self.sync_all()
def setup_nodes(self):
extra_args = None
if hasattr(self, "extra_args"):
extra_args = self.extra_args
self.add_nodes(self.num_nodes, extra_args)
self.start_nodes()
def run_test(self):
raise NotImplementedError
# Public helper methods. These can be accessed by the subclass test scripts.
def add_nodes(self, num_nodes, extra_args=None, rpchost=None, timewait=None, binary=None):
if extra_args is None:
extra_args = [[]] * num_nodes
if binary is None:
binary = [None] * num_nodes
assert_equal(len(extra_args), num_nodes)
assert_equal(len(binary), num_nodes)
for i in range(num_nodes):
self.nodes.append(TestNode(i, self.options.tmpdir, extra_args[i], rpchost, timewait=timewait, binary=binary[i], stderr=None, mocktime=self.mocktime, coverage_dir=self.options.coveragedir, use_cli=self.options.usecli))
def start_node(self, i, *args, **kwargs):
node = self.nodes[i]
node.start(*args, **kwargs)
node.wait_for_rpc_connection()
time.sleep(10)
if self.options.coveragedir is not None:
coverage.write_all_rpc_commands(self.options.coveragedir, node.rpc)
def start_nodes(self, extra_args=None, *args, **kwargs):
if extra_args is None:
extra_args = [None] * self.num_nodes
assert_equal(len(extra_args), self.num_nodes)
try:
for i, node in enumerate(self.nodes):
node.start(extra_args[i], *args, **kwargs)
for node in self.nodes:
node.wait_for_rpc_connection()
except:
# If one node failed to start, stop the others
self.stop_nodes()
raise
time.sleep(10)
if self.options.coveragedir is not None:
for node in self.nodes:
coverage.write_all_rpc_commands(self.options.coveragedir, node.rpc)
def stop_node(self, i):
self.nodes[i].stop_node()
self.nodes[i].wait_until_stopped()
def stop_nodes(self):
for node in self.nodes:
# Issue RPC to stop nodes
node.stop_node()
for node in self.nodes:
# Wait for nodes to stop
time.sleep(5)
node.wait_until_stopped()
def restart_node(self, i, extra_args=None):
self.stop_node(i)
self.start_node(i, extra_args)
def assert_start_raises_init_error(self, i, extra_args=None, expected_msg=None, *args, **kwargs):
with tempfile.SpooledTemporaryFile(max_size=2**16) as log_stderr:
try:
self.start_node(i, extra_args, stderr=log_stderr, *args, **kwargs)
self.stop_node(i)
except Exception as e:
assert 'simulacrumd exited' in str(e) # node must have shutdown
self.nodes[i].running = False
self.nodes[i].process = None
if expected_msg is not None:
log_stderr.seek(0)
stderr = log_stderr.read().decode('utf-8')
if expected_msg not in stderr:
raise AssertionError("Expected error \"" + expected_msg + "\" not found in:\n" + stderr)
else:
if expected_msg is None:
assert_msg = "simulacrumd should have exited with an error"
else:
assert_msg = "simulacrumd should have exited with expected error " + expected_msg
raise AssertionError(assert_msg)
def wait_for_node_exit(self, i, timeout):
self.nodes[i].process.wait(timeout)
def split_network(self):
disconnect_nodes(self.nodes[1], 2)
disconnect_nodes(self.nodes[2], 1)
self.sync_all([self.nodes[:2], self.nodes[2:]])
def join_network(self):
connect_nodes_bi(self.nodes, 1, 2)
self.sync_all()
def sync_all(self, node_groups=None):
if not node_groups:
node_groups = [self.nodes]
for group in node_groups:
sync_blocks(group)
sync_mempools(group)
def enable_mocktime(self):
self.mocktime = 1454124732 + (201 * 10 * 60)
def disable_mocktime(self):
self.mocktime = 0
# Private helper methods. These should not be accessed by the subclass test scripts.
def _start_logging(self):
# Add logger and logging handlers
self.log = logging.getLogger('TestFramework')
self.log.setLevel(logging.DEBUG)
# Create file handler to log all messages
fh = logging.FileHandler(self.options.tmpdir + '/test_framework.log')
fh.setLevel(logging.DEBUG)
# Create console handler to log messages to stderr. By default this logs only error messages, but can be configured with --loglevel.
ch = logging.StreamHandler(sys.stdout)
# User can provide log level as a number or string (eg DEBUG). loglevel was caught as a string, so try to convert it to an int
ll = int(self.options.loglevel) if self.options.loglevel.isdigit() else self.options.loglevel.upper()
ch.setLevel(ll)
# Format logs the same as simulacrumd's debug.log with microprecision (so log files can be concatenated and sorted)
formatter = logging.Formatter(fmt='%(asctime)s.%(msecs)03d000 %(name)s (%(levelname)s): %(message)s', datefmt='%Y-%m-%d %H:%M:%S')
formatter.converter = time.gmtime
fh.setFormatter(formatter)
ch.setFormatter(formatter)
self.log.addHandler(fh)
self.log.addHandler(ch)
if self.options.trace_rpc:
rpc_logger = logging.getLogger("BitcoinRPC")
rpc_logger.setLevel(logging.DEBUG)
rpc_handler = logging.StreamHandler(sys.stdout)
rpc_handler.setLevel(logging.DEBUG)
rpc_logger.addHandler(rpc_handler)
def _initialize_chain(self):
assert self.num_nodes <= MAX_NODES
create_cache = False
for i in range(MAX_NODES):
if not os.path.isdir(get_datadir_path(self.options.cachedir, i)):
create_cache = True
break
if create_cache:
self.log.debug("Creating data directories from cached datadir")
for i in range(MAX_NODES):
if os.path.isdir(get_datadir_path(self.options.cachedir, i)):
shutil.rmtree(get_datadir_path(self.options.cachedir, i))
for i in range(MAX_NODES):
datadir = initialize_datadir(self.options.cachedir, i)
args = [os.getenv("BITCOIND", "simulacrumd"), "-spendzeroconfchange=1", "-server", "-keypool=1", "-datadir=" + datadir, "-discover=0"]
if i > 0:
args.append("-connect=127.0.0.1:" + str(p2p_port(0)))
self.nodes.append(TestNode(i, self.options.cachedir, extra_args=[], rpchost=None, timewait=None, binary=None, stderr=None, mocktime=self.mocktime, coverage_dir=None))
self.nodes[i].args = args
self.start_node(i)
for node in self.nodes:
node.wait_for_rpc_connection()
self.enable_mocktime()
block_time = self.mocktime - (201 * 60)
for i in range(2):
for peer in range(4):
for j in range(25):
set_node_times(self.nodes, block_time)
self.nodes[peer].generate(1)
block_time += 60
sync_blocks(self.nodes)
self.stop_nodes()
self.nodes = []
self.disable_mocktime()
def cache_path(n, *paths):
return os.path.join(get_datadir_path(self.options.cachedir, n), "regtest", *paths)
for i in range(MAX_NODES):
for entry in os.listdir(cache_path(i)):
if entry not in ['wallet.dat', 'chainstate', 'blocks', 'sporks', 'zerocoin', 'backups']:
os.remove(cache_path(i, entry))
for i in range(self.num_nodes):
from_dir = get_datadir_path(self.options.cachedir, i)
to_dir = get_datadir_path(self.options.tmpdir, i)
shutil.copytree(from_dir, to_dir)
initialize_datadir(self.options.tmpdir, i)
def _initialize_chain_clean(self):
for i in range(self.num_nodes):
initialize_datadir(self.options.tmpdir, i)
class ComparisonTestFramework(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 2
self.setup_clean_chain = True
def add_options(self, parser):
parser.add_option("--testbinary", dest="testbinary",
default=os.getenv("BITCOIND", "simulacrumd"),
help="simulacrumd binary to test")
parser.add_option("--refbinary", dest="refbinary",
default=os.getenv("BITCOIND", "simulacrumd"),
help="simulacrumd binary to use for reference nodes (if any)")
def setup_network(self):
extra_args = [['-whitelist=127.0.0.1']] * self.num_nodes
if hasattr(self, "extra_args"):
extra_args = self.extra_args
self.add_nodes(self.num_nodes, extra_args,
binary=[self.options.testbinary] +
[self.options.refbinary] * (self.num_nodes - 1))
self.start_nodes()
class SkipTest(Exception):
def __init__(self, message):
self.message = message
| true | true |
f71ecfff9c183a5b99999bb2998f06e10f2e2b9c | 1,030 | py | Python | var/spack/repos/builtin/packages/py-msal/package.py | kkauder/spack | 6ae8d5c380c1f42094b05d38be26b03650aafb39 | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 2 | 2019-02-10T13:47:48.000Z | 2019-04-17T13:05:17.000Z | var/spack/repos/builtin/packages/py-msal/package.py | kkauder/spack | 6ae8d5c380c1f42094b05d38be26b03650aafb39 | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 32 | 2020-12-15T17:29:20.000Z | 2022-03-21T15:08:31.000Z | var/spack/repos/builtin/packages/py-msal/package.py | kkauder/spack | 6ae8d5c380c1f42094b05d38be26b03650aafb39 | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 2 | 2021-04-07T18:27:09.000Z | 2022-03-31T22:52:38.000Z | # Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
class PyMsal(PythonPackage):
"""The Microsoft Authentication Library (MSAL) for Python library enables
your app to access the Microsoft Cloud by supporting authentication of
users with Microsoft Azure Active Directory accounts (AAD) and Microsoft
Accounts (MSA) using industry standard OAuth2 and OpenID Connect."""
homepage = "https://github.com/AzureAD/microsoft-authentication-library-for-python"
pypi = "msal/msal-1.3.0.tar.gz"
version('1.3.0', sha256='5442a3a9d006506e653d3c4daff40538bdf067bf07b6b73b32d1b231d5e77a92')
version('1.0.0', sha256='ecbe3f5ac77facad16abf08eb9d8562af3bc7184be5d4d90c9ef4db5bde26340')
depends_on('py-setuptools', type='build')
depends_on('py-requests@2.0.0:2.999', type=('build', 'run'))
depends_on('py-pyjwt@1.0.0:1.999+crypto', type=('build', 'run'))
| 46.818182 | 95 | 0.750485 |
class PyMsal(PythonPackage):
homepage = "https://github.com/AzureAD/microsoft-authentication-library-for-python"
pypi = "msal/msal-1.3.0.tar.gz"
version('1.3.0', sha256='5442a3a9d006506e653d3c4daff40538bdf067bf07b6b73b32d1b231d5e77a92')
version('1.0.0', sha256='ecbe3f5ac77facad16abf08eb9d8562af3bc7184be5d4d90c9ef4db5bde26340')
depends_on('py-setuptools', type='build')
depends_on('py-requests@2.0.0:2.999', type=('build', 'run'))
depends_on('py-pyjwt@1.0.0:1.999+crypto', type=('build', 'run'))
| true | true |
f71ed0eb715dfac31c4a74be37828bd583bcf88b | 15,233 | py | Python | train_nuclei.py | xumm94/2018_data_science_bowl | 9f7a6b60b7c1e933c30acd8abbdeeb7bd869a3f6 | [
"MIT"
] | null | null | null | train_nuclei.py | xumm94/2018_data_science_bowl | 9f7a6b60b7c1e933c30acd8abbdeeb7bd869a3f6 | [
"MIT"
] | null | null | null | train_nuclei.py | xumm94/2018_data_science_bowl | 9f7a6b60b7c1e933c30acd8abbdeeb7bd869a3f6 | [
"MIT"
] | null | null | null | # coding: utf-8
"""
Mask R-CNN - Train on Nuclei Dataset (Updated from train_shape.ipynb)
This notebook shows how to train Mask R-CNN on your own dataset.
To keep things simple we use a synthetic dataset of shapes (squares,
triangles, and circles) which enables fast training. You'd still
need a GPU, though, because the network backbone is a Resnet101,
which would be too slow to train on a CPU. On a GPU, you can start
to get okay-ish results in a few minutes, and good results in less than an hour.
"""
import os
import sys
import random
import math
import re
import time
from tqdm import tqdm
import numpy as np
import pandas as pd
import cv2
import matplotlib
import matplotlib.pyplot as plt
from config import Config
import utils
import model as modellib
import visualize
from model import log
import logging
import argparse
"""
Configurations
Override form Config
"""
class NucleiConfig(Config):
"""Configuration for training on the toy shapes dataset.
Derives from the base Config class and overrides values specific
to the toy shapes dataset.
"""
# Give the configuration a recognizable name
NAME = "nuclei"
# Train on 1 GPU and 8 images per GPU. We can put multiple images on each
# GPU because the images are small. Batch size is 8 (GPUs * images/GPU).
GPU_COUNT = 2
IMAGES_PER_GPU = 8
# Number of classes (including background)
NUM_CLASSES = 1 + 1 # background + 3 shapes
# Use small images for faster training. Set the limits of the small side
# the large side, and that determines the image shape.
IMAGE_MIN_DIM = 512
IMAGE_MAX_DIM = 512
# Number of ROIs per image to feed to classifier/mask heads
# The Mask RCNN paper uses 512 but often the RPN doesn't generate
# enough positive proposals to fill this and keep a positive:negative
# ratio of 1:3. You can increase the number of proposals by adjusting
# the RPN NMS threshold.
TRAIN_ROIS_PER_IMAGE = 300
# Use smaller anchors because our image and objects are small
RPN_ANCHOR_SCALES = (8, 16, 32, 64, 128) # anchor side in pixels
# Reduce training ROIs per image because the images are small and have
# few objects. Aim to allow ROI sampling to pick 33% positive ROIs.
TRAIN_ROIS_PER_IMAGE = 32
# Use a small epoch since the data is simple
STEPS_PER_EPOCH = 50
# use small validation steps since the epoch is small
VALIDATION_STEPS = 5
LEARNING_RATE = 0.001
# Maximum number of ground truth instances to use in one image
MAX_GT_INSTANCES = 200
def display(self, logger):
"""Display Configuration values."""
print("\nConfigurations:")
logger.info('\nConfigurations:')
for a in dir(self):
if not a.startswith("__") and not callable(getattr(self, a)):
print("{:30} {}".format(a, getattr(self, a)))
logger.info("{:30} {}".format(a, getattr(self, a)))
print("\n")
class NucleiDataset(utils.Dataset):
"""Load the images and masks from dataset."""
def load_image_info(self, data_path, img_set = None):
"""Get the picture names(ids) of the dataset."""
# Add classes
self.add_class("nucleis", 1, "regular")
# TO DO : Three different image types into three classes
# Add images
# Get the images ids of training/testing set
if img_set is None:
train_ids = next(os.walk(data_path))[1]
else:
with open(img_set) as f:
read_data = f.readlines()
train_ids = [read_data[i][:-1] for i in range(0,len(read_data))] # Delete New line '\n'
# Get the info of the images
for i, id_ in enumerate(train_ids):
file_path = os.path.join(data_path, id_)
img_path = os.path.join(file_path, "images")
masks_path = os.path.join(file_path, "masks")
img_name = id_ + ".png"
img = cv2.imread(os.path.join(img_path, img_name))
width, height, _ = img.shape
self.add_image("nucleis", image_id=id_, path=file_path,
img_path=img_path, masks_path=masks_path,
width=width, height=height,
nucleis="nucleis")
def load_image(self, image_id):
"""Load image from file of the given image ID."""
info = self.image_info[image_id]
img_path = info["img_path"]
img_name = info["id"] + ".png"
image = cv2.imread(os.path.join(img_path, img_name))
return image
def image_reference(self, image_id):
"""Return the path of the given image ID."""
info = self.image_info[image_id]
if info["source"] == "nucleis":
return info["path"]
else:
super(self.__class__).image_reference(self, image_id)
def load_mask(self, image_id):
"""Load the instance masks of the given image ID."""
info = self.image_info[image_id]
mask_files = next(os.walk(info["masks_path"]))[2]
masks = np. zeros([info['width'], info['height'], len(mask_files)], dtype=np.uint8)
for i, id_ in enumerate(mask_files):
single_mask = cv2.imread(os.path.join(info["masks_path"], id_), 0)
masks[:, :, i:i+1] = single_mask[:, :, np.newaxis]
class_ids = np.ones(len(mask_files))
return masks, class_ids.astype(np.int32)
# def test(self):
# return "1"
def rle_encoding(x):
dots = np.where(x.T.flatten() == 1)[0]
run_lengths = []
prev = -2
for b in dots:
if (b>prev+1): run_lengths.extend((b + 1, 0))
run_lengths[-1] += 1
prev = b
return run_lengths
def parser_argument():
# Parse command line arguments
parser = argparse.ArgumentParser(
description='Train Mask R-CNN on Nuclei Dataset.')
parser.add_argument("command",
metavar="<command>",
help="'train' or 'evaluate' or predict")
parser.add_argument('--datapath',
metavar="/path/to/data/",
default="./data",
help='Directory of the Nuclei dataset')
parser.add_argument('--init_with',
metavar="/init/type",
default="coco",
help="Initialize with the (\"coco\"/\"imagenet\"/\"last\") net")
parser.add_argument('--model',
metavar="/path/to/weights.h5",
default="./models/mask_rcnn_coco.h5",
help="Path to weights .h5 file")
parser.add_argument('--ckpt',
metavar="/path/to/save/checkpoint",
default="/data/lf/Nuclei/logs",
help="Directory of the checkpoint")
parser.add_argument('--epochs',
metavar="/num/of/epochs",
default="50",
help="The number of the training epochs")
parser.add_argument('--finetune',
metavar="/finetune/type",
default="heads",
help="The type of the finetune method(\"heads\" or \"all\")")
parser.add_argument('--lr_start',
metavar="/value/of/start/lr",
default="0.001",
type=float,
help="The Value of learning rate to start")
parser.add_argument('--train_dataset',
metavar="train/imgs/names",
default="10-fold-train-1.txt",
help="The training set split of the data")
parser.add_argument('--val_dataset',
metavar="val/imgs/names",
default="10-fold-val-1.txt",
help="The validation set split of the data")
return parser.parse_args()
if __name__ == '__main__':
args = parser_argument()
logname = "config-" + time.strftime('%Y%m%d%H%M', time.localtime(time.time())) +".log"
logging.basicConfig(filename=os.path.join(args.ckpt, logname), level=logging.INFO)
logger = logging.getLogger('root')
logger.info('\nBasic Setting:')
logger.info('\nCommand: {} \n Initialize: {} \n Model: {} \n Datapath: {} \n Ckpt: {} \n Epochs \
: {} \n Finetune: {} \n Train_dataset: {} \n Val_dataset: {} \n' \
.format(args.command, args.init_with, args.model, args.datapath, args.ckpt,\
args.epochs, args.finetune, args.train_dataset, args.val_dataset))
# Train or evaluate or predict
if args.command == "train":
config = NucleiConfig()
config.LEARNING_RATE = args.lr_start
config.display(logger)
model = modellib.MaskRCNN(mode="training", config=config,
model_dir=args.ckpt)
# Select weights file to load
print("Loading weights From ", args.model)
if args.init_with == "imagenet":
model.load_weights(model.get_imagenet_weights(), by_name=True)
elif args.init_with == "coco":
# Load weights trained on MS COCO, but skip layers that
# are different due to the different number of classes
# See README for instructions to download the COCO weights
model.load_weights(args.model, by_name=True,
exclude=["mrcnn_class_logits", "mrcnn_bbox_fc",
"mrcnn_bbox", "mrcnn_mask"])
elif args.init_with == "last":
# Load the last model you trained and continue training
model.load_weights(args.model, by_name=True)
# Training dataset. Use the training set and 35K from the
# validation set, as as in the Mask RCNN paper.
DATASET_DIR = os.path.join(args.datapath, "stage1_train_fixed")
TRAIN_IMG_SET = os.path.join(args.datapath, "stage1_train_fixed_10fold", args.train_dataset)
VAL_IMG_SET = os.path.join(args.datapath, "stage1_train_fixed_10fold", args.val_dataset)
dataset_train = NucleiDataset()
dataset_train.load_image_info(DATASET_DIR, TRAIN_IMG_SET)
dataset_train.prepare()
dataset_val = NucleiDataset()
dataset_val.load_image_info(DATASET_DIR, VAL_IMG_SET)
dataset_val.prepare()
print("Loading {} training images, {} validation images"
.format(len(dataset_train.image_ids), len(dataset_val.image_ids)))
if args.finetune == "heads":
model.train(dataset_train, dataset_val,
learning_rate=config.LEARNING_RATE,
epochs=int(args.epochs),
layers='heads',
logger=logger)
elif args.finetune == "all":
model.train(dataset_train, dataset_val,
learning_rate=config.LEARNING_RATE,
epochs=int(args.epochs),
layers='all',
logger=logger)
else:
raise NameError("Only two finetune type is vaild(\"heads\" or \"all\")")
elif args.command == "evaluate":
# TODO AP in [0.5, 0.55, 0.6, 0.65, 0.7, 0.75, 0.8, 0.85, 0.9, 0.95]
class InferenceConfig(NucleiConfig):
# Set batch size to 1 since we'll be running inference on
# one image at a time. Batch size = GPU_COUNT * IMAGES_PER_GPU
GPU_COUNT = 1
IMAGES_PER_GPU = 1
DETECTION_MAX_INSTANCES = 300
config = InferenceConfig()
config.display()
model = modellib.MaskRCNN(mode="inference", config=config,
model_dir=args.ckpt)
print("Loading weights From ", args.model)
model.load_weights(args.model, by_name=True)
VALSET_DIR = os.path.join(args.dataset, "stage1_val")
dataset_val = NucleiDataset()
dataset_val.load_image_info(VALSET_DIR)
dataset_val.prepare()
print("Evaluate {} images".format(len(dataset_val.image_ids)))
APs = []
for image_id in tqdm(dataset_val.image_ids):
# Load image and ground truth data
image, image_meta, gt_class_id, gt_bbox, gt_mask =modellib.load_image_gt(
dataset_val, InferenceConfig, image_id, use_mini_mask=False)
molded_images = np.expand_dims(modellib.mold_image(image, config), 0)
# Run object detection
results = model.detect([image], verbose=0)
r = results[0]
# Compute AP
AP, precisions, recalls, overlaps =utils.compute_ap(gt_bbox,
gt_class_id,r["rois"], r["class_ids"], r["scores"], iou_threshold=0.5)
APs.append(AP)
print("mAP: ", np.mean(APs))
elif args.command == "predict":
class InferenceConfig(NucleiConfig):
# Set batch size to 1 since we'll be running inference on
# one image at a time. Batch size = GPU_COUNT * IMAGES_PER_GPU
GPU_COUNT = 1
IMAGES_PER_GPU = 1
DETECTION_NMS_THRESHOLD = 0.3
DETECTION_MAX_INSTANCES = 300
config = InferenceConfig()
config.display()
model = modellib.MaskRCNN(mode="inference", config=config,
model_dir=args.ckpt)
print("Loading weights From ", args.model)
model.load_weights(args.model, by_name=True)
TESTSET_DIR = os.path.join(args.dataset, "stage1_test")
dataset_test = NucleiDataset()
dataset_test.load_image_info(TESTSET_DIR)
dataset_test.prepare()
print("Predict {} images".format(dataset_test.num_images))
test_ids = []
test_rles = []
for image_id in tqdm(dataset_test.image_ids):
image = dataset_test.load_image(image_id)
id_ = dataset_test.image_info[image_id]["id"]
results = model.detect([image], verbose=0)
r = results[0]
mask_exist = np.zeros(r['masks'].shape[:-1], dtype=np.uint8)
for i in range(r['masks'].shape[-1]):
_mask = r['masks'][:,:,i]
overlap_index = np.where(np.multiply(mask_exist, _mask) == 1)
_mask[overlap_index] = 0
mask_exist += _mask
if np.any(_mask):
test_ids.append(id_)
test_rles.append(rle_encoding(_mask))
else :
continue
# if np.count_nonzero(_mask) > 40 :
# test_ids.append(id_)
# test_rles.append(rle_encoding(_mask))
# else :
# continue
sub = pd.DataFrame()
sub['ImageId'] = test_ids
sub['EncodedPixels'] = pd.Series(test_rles).apply(lambda x: ' '.join(str(y) for y in x))
csvpath = "{}.csv".format(args.model)
print("Writing the Result in {}".format(csvpath))
sub.to_csv(csvpath, index=False)
else:
print("'{}' is not recognized. Use 'train' 'evaluate' 'predict'".format(args.command))
| 38.760814 | 101 | 0.586359 |
import os
import sys
import random
import math
import re
import time
from tqdm import tqdm
import numpy as np
import pandas as pd
import cv2
import matplotlib
import matplotlib.pyplot as plt
from config import Config
import utils
import model as modellib
import visualize
from model import log
import logging
import argparse
class NucleiConfig(Config):
NAME = "nuclei"
GPU_COUNT = 2
IMAGES_PER_GPU = 8
NUM_CLASSES = 1 + 1
IMAGE_MIN_DIM = 512
IMAGE_MAX_DIM = 512
# enough positive proposals to fill this and keep a positive:negative
# ratio of 1:3. You can increase the number of proposals by adjusting
# the RPN NMS threshold.
TRAIN_ROIS_PER_IMAGE = 300
# Use smaller anchors because our image and objects are small
RPN_ANCHOR_SCALES = (8, 16, 32, 64, 128) # anchor side in pixels
# Reduce training ROIs per image because the images are small and have
# few objects. Aim to allow ROI sampling to pick 33% positive ROIs.
TRAIN_ROIS_PER_IMAGE = 32
# Use a small epoch since the data is simple
STEPS_PER_EPOCH = 50
# use small validation steps since the epoch is small
VALIDATION_STEPS = 5
LEARNING_RATE = 0.001
# Maximum number of ground truth instances to use in one image
MAX_GT_INSTANCES = 200
def display(self, logger):
print("\nConfigurations:")
logger.info('\nConfigurations:')
for a in dir(self):
if not a.startswith("__") and not callable(getattr(self, a)):
print("{:30} {}".format(a, getattr(self, a)))
logger.info("{:30} {}".format(a, getattr(self, a)))
print("\n")
class NucleiDataset(utils.Dataset):
def load_image_info(self, data_path, img_set = None):
# Add classes
self.add_class("nucleis", 1, "regular")
# TO DO : Three different image types into three classes
# Add images
# Get the images ids of training/testing set
if img_set is None:
train_ids = next(os.walk(data_path))[1]
else:
with open(img_set) as f:
read_data = f.readlines()
train_ids = [read_data[i][:-1] for i in range(0,len(read_data))] # Delete New line '\n'
# Get the info of the images
for i, id_ in enumerate(train_ids):
file_path = os.path.join(data_path, id_)
img_path = os.path.join(file_path, "images")
masks_path = os.path.join(file_path, "masks")
img_name = id_ + ".png"
img = cv2.imread(os.path.join(img_path, img_name))
width, height, _ = img.shape
self.add_image("nucleis", image_id=id_, path=file_path,
img_path=img_path, masks_path=masks_path,
width=width, height=height,
nucleis="nucleis")
def load_image(self, image_id):
info = self.image_info[image_id]
img_path = info["img_path"]
img_name = info["id"] + ".png"
image = cv2.imread(os.path.join(img_path, img_name))
return image
def image_reference(self, image_id):
info = self.image_info[image_id]
if info["source"] == "nucleis":
return info["path"]
else:
super(self.__class__).image_reference(self, image_id)
def load_mask(self, image_id):
info = self.image_info[image_id]
mask_files = next(os.walk(info["masks_path"]))[2]
masks = np. zeros([info['width'], info['height'], len(mask_files)], dtype=np.uint8)
for i, id_ in enumerate(mask_files):
single_mask = cv2.imread(os.path.join(info["masks_path"], id_), 0)
masks[:, :, i:i+1] = single_mask[:, :, np.newaxis]
class_ids = np.ones(len(mask_files))
return masks, class_ids.astype(np.int32)
# def test(self):
# return "1"
def rle_encoding(x):
dots = np.where(x.T.flatten() == 1)[0]
run_lengths = []
prev = -2
for b in dots:
if (b>prev+1): run_lengths.extend((b + 1, 0))
run_lengths[-1] += 1
prev = b
return run_lengths
def parser_argument():
# Parse command line arguments
parser = argparse.ArgumentParser(
description='Train Mask R-CNN on Nuclei Dataset.')
parser.add_argument("command",
metavar="<command>",
help="'train' or 'evaluate' or predict")
parser.add_argument('--datapath',
metavar="/path/to/data/",
default="./data",
help='Directory of the Nuclei dataset')
parser.add_argument('--init_with',
metavar="/init/type",
default="coco",
help="Initialize with the (\"coco\"/\"imagenet\"/\"last\") net")
parser.add_argument('--model',
metavar="/path/to/weights.h5",
default="./models/mask_rcnn_coco.h5",
help="Path to weights .h5 file")
parser.add_argument('--ckpt',
metavar="/path/to/save/checkpoint",
default="/data/lf/Nuclei/logs",
help="Directory of the checkpoint")
parser.add_argument('--epochs',
metavar="/num/of/epochs",
default="50",
help="The number of the training epochs")
parser.add_argument('--finetune',
metavar="/finetune/type",
default="heads",
help="The type of the finetune method(\"heads\" or \"all\")")
parser.add_argument('--lr_start',
metavar="/value/of/start/lr",
default="0.001",
type=float,
help="The Value of learning rate to start")
parser.add_argument('--train_dataset',
metavar="train/imgs/names",
default="10-fold-train-1.txt",
help="The training set split of the data")
parser.add_argument('--val_dataset',
metavar="val/imgs/names",
default="10-fold-val-1.txt",
help="The validation set split of the data")
return parser.parse_args()
if __name__ == '__main__':
args = parser_argument()
logname = "config-" + time.strftime('%Y%m%d%H%M', time.localtime(time.time())) +".log"
logging.basicConfig(filename=os.path.join(args.ckpt, logname), level=logging.INFO)
logger = logging.getLogger('root')
logger.info('\nBasic Setting:')
logger.info('\nCommand: {} \n Initialize: {} \n Model: {} \n Datapath: {} \n Ckpt: {} \n Epochs \
: {} \n Finetune: {} \n Train_dataset: {} \n Val_dataset: {} \n' \
.format(args.command, args.init_with, args.model, args.datapath, args.ckpt,\
args.epochs, args.finetune, args.train_dataset, args.val_dataset))
# Train or evaluate or predict
if args.command == "train":
config = NucleiConfig()
config.LEARNING_RATE = args.lr_start
config.display(logger)
model = modellib.MaskRCNN(mode="training", config=config,
model_dir=args.ckpt)
# Select weights file to load
print("Loading weights From ", args.model)
if args.init_with == "imagenet":
model.load_weights(model.get_imagenet_weights(), by_name=True)
elif args.init_with == "coco":
# Load weights trained on MS COCO, but skip layers that
# are different due to the different number of classes
# See README for instructions to download the COCO weights
model.load_weights(args.model, by_name=True,
exclude=["mrcnn_class_logits", "mrcnn_bbox_fc",
"mrcnn_bbox", "mrcnn_mask"])
elif args.init_with == "last":
# Load the last model you trained and continue training
model.load_weights(args.model, by_name=True)
# Training dataset. Use the training set and 35K from the
# validation set, as as in the Mask RCNN paper.
DATASET_DIR = os.path.join(args.datapath, "stage1_train_fixed")
TRAIN_IMG_SET = os.path.join(args.datapath, "stage1_train_fixed_10fold", args.train_dataset)
VAL_IMG_SET = os.path.join(args.datapath, "stage1_train_fixed_10fold", args.val_dataset)
dataset_train = NucleiDataset()
dataset_train.load_image_info(DATASET_DIR, TRAIN_IMG_SET)
dataset_train.prepare()
dataset_val = NucleiDataset()
dataset_val.load_image_info(DATASET_DIR, VAL_IMG_SET)
dataset_val.prepare()
print("Loading {} training images, {} validation images"
.format(len(dataset_train.image_ids), len(dataset_val.image_ids)))
if args.finetune == "heads":
model.train(dataset_train, dataset_val,
learning_rate=config.LEARNING_RATE,
epochs=int(args.epochs),
layers='heads',
logger=logger)
elif args.finetune == "all":
model.train(dataset_train, dataset_val,
learning_rate=config.LEARNING_RATE,
epochs=int(args.epochs),
layers='all',
logger=logger)
else:
raise NameError("Only two finetune type is vaild(\"heads\" or \"all\")")
elif args.command == "evaluate":
# TODO AP in [0.5, 0.55, 0.6, 0.65, 0.7, 0.75, 0.8, 0.85, 0.9, 0.95]
class InferenceConfig(NucleiConfig):
# Set batch size to 1 since we'll be running inference on
GPU_COUNT = 1
IMAGES_PER_GPU = 1
DETECTION_MAX_INSTANCES = 300
config = InferenceConfig()
config.display()
model = modellib.MaskRCNN(mode="inference", config=config,
model_dir=args.ckpt)
print("Loading weights From ", args.model)
model.load_weights(args.model, by_name=True)
VALSET_DIR = os.path.join(args.dataset, "stage1_val")
dataset_val = NucleiDataset()
dataset_val.load_image_info(VALSET_DIR)
dataset_val.prepare()
print("Evaluate {} images".format(len(dataset_val.image_ids)))
APs = []
for image_id in tqdm(dataset_val.image_ids):
image, image_meta, gt_class_id, gt_bbox, gt_mask =modellib.load_image_gt(
dataset_val, InferenceConfig, image_id, use_mini_mask=False)
molded_images = np.expand_dims(modellib.mold_image(image, config), 0)
results = model.detect([image], verbose=0)
r = results[0]
AP, precisions, recalls, overlaps =utils.compute_ap(gt_bbox,
gt_class_id,r["rois"], r["class_ids"], r["scores"], iou_threshold=0.5)
APs.append(AP)
print("mAP: ", np.mean(APs))
elif args.command == "predict":
class InferenceConfig(NucleiConfig):
# one image at a time. Batch size = GPU_COUNT * IMAGES_PER_GPU
GPU_COUNT = 1
IMAGES_PER_GPU = 1
DETECTION_NMS_THRESHOLD = 0.3
DETECTION_MAX_INSTANCES = 300
config = InferenceConfig()
config.display()
model = modellib.MaskRCNN(mode="inference", config=config,
model_dir=args.ckpt)
print("Loading weights From ", args.model)
model.load_weights(args.model, by_name=True)
TESTSET_DIR = os.path.join(args.dataset, "stage1_test")
dataset_test = NucleiDataset()
dataset_test.load_image_info(TESTSET_DIR)
dataset_test.prepare()
print("Predict {} images".format(dataset_test.num_images))
test_ids = []
test_rles = []
for image_id in tqdm(dataset_test.image_ids):
image = dataset_test.load_image(image_id)
id_ = dataset_test.image_info[image_id]["id"]
results = model.detect([image], verbose=0)
r = results[0]
mask_exist = np.zeros(r['masks'].shape[:-1], dtype=np.uint8)
for i in range(r['masks'].shape[-1]):
_mask = r['masks'][:,:,i]
overlap_index = np.where(np.multiply(mask_exist, _mask) == 1)
_mask[overlap_index] = 0
mask_exist += _mask
if np.any(_mask):
test_ids.append(id_)
test_rles.append(rle_encoding(_mask))
else :
continue
# if np.count_nonzero(_mask) > 40 :
# test_ids.append(id_)
# test_rles.append(rle_encoding(_mask))
# else :
# continue
sub = pd.DataFrame()
sub['ImageId'] = test_ids
sub['EncodedPixels'] = pd.Series(test_rles).apply(lambda x: ' '.join(str(y) for y in x))
csvpath = "{}.csv".format(args.model)
print("Writing the Result in {}".format(csvpath))
sub.to_csv(csvpath, index=False)
else:
print("'{}' is not recognized. Use 'train' 'evaluate' 'predict'".format(args.command))
| true | true |
f71ed1a57e43f0540a74700d7289f51d9537ec4f | 39,787 | py | Python | ankify_roam/roam/content.py | taylormitchell/ankify_roam | bd058e3bf086a300680a14cdfc1079d83ed8d79f | [
"MIT"
] | 37 | 2020-07-03T01:59:13.000Z | 2022-03-24T04:23:43.000Z | ankify_roam/roam/content.py | taylormitchell/ankify_roam | bd058e3bf086a300680a14cdfc1079d83ed8d79f | [
"MIT"
] | 33 | 2020-07-05T02:09:22.000Z | 2022-03-23T01:51:56.000Z | ankify_roam/roam/content.py | taylormitchell/ankify_roam | bd058e3bf086a300680a14cdfc1079d83ed8d79f | [
"MIT"
] | 6 | 2020-07-05T07:10:33.000Z | 2022-03-10T21:06:53.000Z | import os
import re
import logging
from functools import reduce
from itertools import zip_longest
from collections.abc import Iterable
import html
logger = logging.getLogger(__name__)
RE_SPLIT_OR = "(?<!\\\)\|"
class BlockContent(list):
def __init__(self, roam_objects=[]):
"""
Args:
roam_objects (List of BlockContentItem)
"""
if type(roam_objects) not in [list, BlockContent]:
roam_objects = [roam_objects]
for obj in roam_objects:
if type(obj) in [str, int, float]:
obj = String(str(obj))
elif isinstance(obj, BlockContentItem):
pass
else:
raise ValueError(f"roam_objects can't contain {type(obj)} type objects")
self.append(obj)
@classmethod
def find_and_replace(cls, obj, skip=[], *args, **kwargs):
roam_object_types = [
BlockQuote,
CodeBlock,
CodeInline,
Cloze,
Image,
Alias,
Checkbox,
Embed,
View,
Button,
PageTag,
PageRef,
BlockRef,
Attribute,
#Url, #TODO: don't have a good regex for this right now
]
roam_object_types = [o for o in roam_object_types if o not in skip]
roam_objects = BlockContent(obj)
for rm_obj_type in roam_object_types:
roam_objects = rm_obj_type.find_and_replace(roam_objects, *args, **kwargs)
return cls(roam_objects)
@classmethod
def from_string(cls, string, *args, **kwargs):
return cls.find_and_replace(string, *args, **kwargs)
def get_tags(self):
tags = []
for obj in self:
tags += obj.get_tags()
return list(set(tags))
def to_string(self):
return "".join([o.to_string() for o in self])
def to_html(self, *args, **kwargs):
# TODO: implement filters
res = "".join([o.to_html(*args, **kwargs) for o in self])
res = self._all_emphasis_to_html(res)
return res
def is_single_pageref(self):
return len(self)==1 and type(self[0])==PageRef
def get_strings(self):
return [o for o in self if type(o)==String]
@staticmethod
def _get_emphasis_locs(string, emphasis):
emphasis_locs = []
emphasis_start = emphasis_end = None
for i,c in enumerate(string):
if emphasis_start is None and string[i:i+len(emphasis)] == emphasis:
emphasis_start = i
continue
if emphasis_end is None and string[i:i+len(emphasis)] == emphasis:
emphasis_end = i + (len(emphasis)-1)
emphasis_locs.append((emphasis_start, emphasis_end))
emphasis_start = emphasis_end = None
return emphasis_locs
def _emphasis_to_html(self, string, emphasis, html_left, html_right):
emphasis_locs = self._get_emphasis_locs(string, emphasis)
diff = 0
for (i, j) in emphasis_locs:
i, j = i + diff, j + diff
string = string[:i] + html_left + string[i+len(emphasis):j-len(emphasis)+1] + html_right + string[j+1:]
diff += len(html_left+html_right) - len(emphasis+emphasis)
return string
def _all_emphasis_to_html(self, string):
string = self._emphasis_to_html(string, emphasis="`", html_left="<code>", html_right="</code>")
string = self._emphasis_to_html(string, emphasis="**", html_left="<b>", html_right="</b>")
string = self._emphasis_to_html(string, emphasis="__", html_left="<em>", html_right="</em>")
string = self._emphasis_to_html(string, emphasis="^^", html_left='<span class="roam-highlight">', html_right='</span>')
return string
def __repr__(self):
return "<%s(%s)>" % (
self.__class__.__name__, repr(list(self)))
def get_contents(self, recursive=False):
if not recursive:
return list(self)
else:
items = []
for item in self:
items += [item]
items += item.get_contents()
return items
def merge_adjacent_strings(self):
i = 0
while i + 1 < len(self):
if type(self[i]) == String and type(self[i+1]) == String:
self[i].string += self[i+1].string
del self[i+1]
else:
i += 1
class BlockContentItem:
@classmethod
def from_string(cls, string, validate=True):
if validate and not cls.validate_string(string):
raise ValueError(f"Invalid string '{string}' for {cls.__name__}")
@classmethod
def validate_string(cls, string):
pat = cls.create_pattern(string)
pat = "|".join([f"^{p}$" for p in re.split(RE_SPLIT_OR, pat)])
if re.match(re.compile(pat), string):
return True
return False
def to_string(self):
raise NotImplementedError
def to_html(self, *args, **kwargs):
return self.string
def get_tags(self):
return []
def get_contents(self):
return []
@classmethod
def _find_and_replace(cls, string, *args, **kwargs):
"See the find_and_replace method"
pat = cls.create_pattern(string)
if not pat:
return [String(string)]
roam_objects = [cls.from_string(s, validate=False, *args, **kwargs) for s in re.findall(pat, string)]
string_split = [String(s) for s in re.split(pat, string)]
# Weave strings and roam objects together
roam_objects = [a for b in zip_longest(string_split, roam_objects) for a in b if a]
roam_objects = [o for o in roam_objects if o.to_string()]
return roam_objects
@classmethod
def find_and_replace(cls, string, *args, **kwargs):
"""Replace all substring representations of this object with this object
Args:
string (str or sequence of BlockContentItem)
Returns:
BlockContent: A sequence of String and this object type.
"""
if type(string)==str:
roam_objects = BlockContent([String(string)])
elif type(string)==BlockContent:
roam_objects = string
else:
raise ValueError(f"'{type(string)}' is an invalid type for `string`")
new_roam_objects = []
for obj in roam_objects:
if type(obj)==String:
new_roam_objects += cls._find_and_replace(obj.to_string(), *args, **kwargs)
else:
new_roam_objects += [obj]
roam_objects = new_roam_objects
return BlockContent(roam_objects)
def __repr__(self):
return "<%s(string='%s')>" % (
self.__class__.__name__, self.to_string())
def __eq__(self, b):
return self.to_string()==b.to_string()
class BlockQuote(BlockContentItem):
def __init__(self, block_content, prefix="> "):
self.block_content = block_content
self.prefix = prefix
@classmethod
def from_string(cls, string, validate=True, **kwargs):
super().from_string(string, validate)
prefix, quote = re.match("^(>\s?)([\w\W]*)$", string).groups()
block_content = BlockContent.from_string(quote, **kwargs)
return cls(block_content, prefix=prefix)
def to_string(self):
return self.prefix + self.block_content.to_string()
def to_html(self, *args, **kwargs):
return '<blockquote class="rm-bq">' + self.block_content.to_html(*args, **kwargs) + '</blockquote>'
def get_tags(self):
return self.block_content.get_tags()
def get_contents(self):
return self.block_content.get_contents()
@classmethod
def create_pattern(cls, string=None):
return "^>[\w\W]*$"
def __eq__(self, other):
return type(self)==type(other) and self.block_content.to_string()==other.block_content.to_string()
class ClozeLeftBracket(BlockContentItem):
"""
- {
- {1
- {1:
- {c1:
- [[{c1:]]
"""
def __init__(self, id=None, enclosed=False, c=False, sep=""):
self.id = id
self.enclosed = enclosed
self.c = c
self.sep = sep
@classmethod
def _find_and_replace(cls, string):
pats = [
"\[\[{c?\d*[:|]?\]\]", # [[{]] or [[{c1:}]]
"(?<!{){c?\d+[:|]", # {1 or {c1:
"(?<!{){(?!{)" # {
]
matches = list(re.finditer("|".join(pats), string))
if not matches:
return [String(string)]
objs = []
last_cloze_end = 0
for match in matches:
# Create cloze
text = match.group(0)
c = "c" in text
enclosed = text.startswith("[[")
m = re.search("\d+", text)
id = int(m.group(0)) if m else None
if ":" in text:
sep = ":"
elif "|" in text:
sep = "|"
else:
sep = ""
# Split string and replace with objects
objs.append(String(string[last_cloze_end:match.start()]))
objs.append(cls(id, enclosed, c, sep))
last_cloze_end = match.end()
if last_cloze_end != len(string):
objs.append(String(string[last_cloze_end:]))
return BlockContent(objs)
def to_string(self):
res = "{"
if self.c:
res += "c"
if self.id:
res += str(self.id)
if self.sep:
res += self.sep
if self.enclosed:
res = "[[" + res + "]]"
return res
def to_html(self):
return "{{c" + str(self.id) + "::"
def __repr__(self):
return "<%s(string='%s')>" % (
self.__class__.__name__, self.to_string())
class ClozeRightBracket(BlockContentItem):
"""
- [[::hint}]]
- [[}]]
- [[::hint]]}
- }
- ::hint}
"""
def __init__(self, enclosed=False, hint=None, string=None):
self.enclosed = enclosed
self.hint = hint
self.string = string
@classmethod
def _find_and_replace(cls, string):
pats = [
"\[\[(?:::[^}\[]*)?}\]\]", # [[}]] or [[::hint}]]
"\[\[(?:::[^}\[]*)\]\]}", # [[::hint]]}
"(?:::[^}\[]*)}(?!})", # ::hint}
"(?<!})}(?!})", # }
]
matches = re.finditer("|".join(pats), string)
if not matches:
return [String(string)]
objs = []
last_cloze_end = 0
for match in matches:
text = match.group(0)
# [[}]] or [[::hint}]]
if text.startswith("[[") and text.endswith("]]"):
hint = ClozeHint(re.sub("[\[\]}]", "", text)[2:]) if "::" in text else None
enclosed = True
# [[::hint]]}
elif text.startswith("[[") and text.endswith("}"):
hint = ClozeHint(re.sub("[\[\]}]", "", text)[2:], enclosed=True)
enclosed = False
# } or ::hint}
else:
hint = ClozeHint(re.sub("[\[\]}]", "", text)[2:]) if "::" in text else None
enclosed = False
# Split string and replace with objects
objs.append(String(string[last_cloze_end:match.start()]))
objs.append(cls(enclosed, hint=hint))
last_cloze_end = match.end()
if last_cloze_end != len(string):
objs.append(String(string[last_cloze_end:]))
return BlockContent(objs)
def to_string(self):
res = "}"
if self.hint:
res = self.hint.to_string() + res
if self.enclosed:
res = "[[" + res + "]]"
return res
def to_html(self):
if self.hint:
return self.hint.to_html() + "}}"
return "}}"
def __repr__(self):
return "<%s(string='%s')>" % (
self.__class__.__name__, self.to_string())
class ClozeHint(BlockContentItem):
"""
- {something::hint}
- {something[[::hint]]}
- [[{]]something::hint[[}]]
- [[{]]something[[::hint}]]
"""
def __init__(self, text, enclosed=False):
self.text = text
self.enclosed = enclosed
@classmethod
def from_string(cls, hint):
return cls(hint[2:])
@classmethod
def _find_and_replace(cls, string):
pats = [
"\[\[::[^\]]*\]\]",
"::[^}\[]*"
]
matches = re.finditer("|".join(pats), string)
if not matches:
return BlockContent(string)
objs = []
last_cloze_end = 0
for match in matches:
text = match.group(0)
if text.startswith("[["):
enclosed = True
text = text[2:-2] # remove surround brackets
else:
enclosed = False
text = text[2:] # remove '::' prefix
objs.append(String(string[last_cloze_end:match.start()]))
objs.append(cls(text, enclosed))
last_cloze_end = match.end()
if last_cloze_end != len(string):
objs.append(String(string[last_cloze_end:]))
return BlockContent(objs)
def to_string(self):
res = "::" + str(self.text)
if self.enclosed:
res = "[[" + res + "]]"
return res
def to_html(self):
return "::" + str(self.text)
class Cloze(BlockContentItem):
def __init__(self, inner:BlockContent="", left_bracket:ClozeLeftBracket=None, right_bracket:ClozeRightBracket=None,
hint:ClozeHint=None, id=1, c=True, sep=":", enclosed=False, string=None, roam_db=None):
self.inner = BlockContent(inner)
self.left_bracket = left_bracket or ClozeLeftBracket(id=id, c=c, enclosed=enclosed, sep=sep)
self.right_bracket = right_bracket or ClozeRightBracket(enclosed=enclosed)
if self.right_bracket.hint and hint:
raise ValueError("Only allowed one hint")
if type(hint) == str:
hint = ClozeHint(hint)
self._hint = hint
self.string = string
self.roam_db = roam_db
@property
def hint(self):
return self._hint or self.right_bracket.hint
@property
def id(self):
return self.left_bracket.id if self.left_bracket else None
@id.setter
def id(self, id):
self.left_bracket.id = id
@classmethod
def from_string(cls, string, validate=True, **kwargs):
objs = cls.find_and_replace(string)
if len(objs) != 1 or type(objs[0]) != cls:
raise ValueError(f"Invalid string '{string}' for {cls.__name__}")
return objs[0]
@classmethod
def find_and_replace(cls, string, *args, **kwargs):
objs = BlockContent(string)
objs = ClozeLeftBracket.find_and_replace(objs)
objs = ClozeRightBracket.find_and_replace(objs)
objs = ClozeHint.find_and_replace(objs)
res = []
next_idx = 0
left_idx = right_idx = None
for i, obj in enumerate(objs):
# Left cloze bracket
if right_idx is None and type(obj) == ClozeLeftBracket:
res += objs[next_idx:i]
next_idx = left_idx = i
# Right cloze bracket matched to previous left bracket
elif left_idx is not None and type(obj) == ClozeRightBracket:
inner = objs[left_idx+1:i]
hint = None
if type(inner[-1]) == ClozeHint:
inner, hint = inner[:-1], inner[-1]
inner = BlockContent.find_and_replace(inner)
cloze = cls(inner=inner, left_bracket=objs[left_idx], right_bracket=obj, hint=hint)
res.append(cloze)
left_idx = right_idx = None
next_idx = i+1
# Left bracket after an unmatched left bracket
elif left_idx is not None and type(obj) == ClozeLeftBracket:
res += objs[left_idx:i]
next_idx = left_idx = i
# Right bracket after an unmatched right bracket
elif right_idx is not None and type(obj) == ClozeRightBracket:
res += objs[right_idx:i]
next_idx = right_idx = i
res += objs[next_idx:]
# Remove any cloze brackets or hints which weren't matched up
for i, obj in enumerate(res):
if type(obj) in [ClozeLeftBracket, ClozeRightBracket, ClozeHint]:
res[i] = String(obj.to_string())
cls._assign_cloze_ids([o for o in res if type(o)==Cloze])
bc = BlockContent(res)
bc.merge_adjacent_strings()
return bc
def get_tags(self):
return self.inner.get_tags()
def to_string(self, style="anki"):
"""
Args:
style (string): {'anki','roam'}
"""
if style=="anki":
return "{{c%s::%s%s}}" % (self.id, self.inner.to_string(), self.hint.to_string() if self.hint else "")
elif style=="roam":
res = ""
for o in [self.left_bracket, self.inner, self._hint, self.right_bracket]:
res += o.to_string() if o else ""
return res
else:
raise ValueError(f"style='{style}' is an invalid. "\
"Must be 'anki' or 'roam'")
def to_html(self, *args, **kwargs):
"""
Args:
pageref_cloze (str): {'outside', 'inside', 'base_only'}
"""
kwargs['roam_db'] = self.roam_db
proc_cloze = kwargs.get("proc_cloze", True)
pageref_cloze = kwargs.get("pageref_cloze", "outside")
if not proc_cloze:
bc = BlockContent.find_and_replace(self.to_string("roam"), skip=[Cloze])
return bc.to_html(*args, **kwargs)
# Fancy options to move around the cloze when it's only around a PageRef
if self.inner.is_single_pageref() and self.hint is None:
pageref = self.inner[0]
if pageref_cloze=="outside":
content = pageref.to_html()
return Cloze(id=self.id, inner=content, hint=self.hint).to_string()
elif pageref_cloze=="inside":
clozed_title = Cloze(id=self.id, inner=pageref.title, hint=self.hint).to_string()
return pageref.to_html(title=clozed_title)
elif pageref_cloze=="base_only":
clozed_base = Cloze(id=self.id, inner=pageref.get_basename(), hint=self.hint).to_string()
namespace = pageref.get_namespace()
if namespace:
clozed_base = namespace + "/" + clozed_base
return pageref.to_html(title=clozed_base)
else:
raise ValueError(f"{pageref_cloze} is an invalid option for `pageref_cloze`")
res = ""
for o in [self.left_bracket, self.inner, self._hint, self.right_bracket]:
res += o.to_html() if o else ""
return res
@staticmethod
def _assign_cloze_ids(clozes):
assigned_ids = [c.id for c in clozes if c.id]
next_id = 1
for cloze in clozes:
if cloze.id: continue
while next_id in assigned_ids:
next_id += 1
assigned_ids += [next_id]
cloze.id = next_id
def __repr__(self):
string = self.string or self.to_string(style="roam")
return "<%s(id=%s, string='%s')>" % (
self.__class__.__name__, self.id, string)
def __eq__(self, other):
return type(self)==type(other) and self.inner == other.inner
class Image(BlockContentItem):
def __init__(self, src, alt="", string=None):
self.src = src
self.alt = alt
self.string = string
@classmethod
def from_string(cls, string, validate=True, **kwargs):
super().from_string(string, validate)
alt, src = re.search("!\[([^\[\]]*)\]\(([^\)\n]+)\)", string).groups()
return cls(src, alt)
@classmethod
def create_pattern(cls, string=None):
return r"!\[[^\[\]]*\]\([^\)\n]+\)"
def to_string(self):
if self.string:
return self.string
return f""
def to_html(self, *arg, **kwargs):
return f'<img src="{html.escape(self.src)}" alt="{html.escape(self.alt)}" draggable="false" class="rm-inline-img">'
def __eq__(self, other):
return type(self)==type(other) and self.src==other.src and self.alt==other.alt
class Alias(BlockContentItem):
def __init__(self, alias, destination, string=None):
self.alias = alias
self.destination = destination
self.string = string
@classmethod
def from_string(cls, string, validate=True, **kwargs):
super().from_string(string, validate)
alias, destination = re.search(r"^\[([^\[\]]+)\]\(([\W\w]+)\)$", string).groups()
if re.match("^\[\[.*\]\]$", destination):
destination = PageRef.from_string(destination)
elif re.match("^\(\(.*\)\)$", destination):
roam_db = kwargs.get("roam_db", None)
destination = BlockRef.from_string(destination, roam_db=roam_db)
else:
# TODO: should this be a Url object?
destination = String(destination)
return cls(alias, destination, string)
def to_string(self):
if self.string:
return self.string
return f"[{self.alias}]({self.destination.to_string()})"
def to_html(self, *arg, **kwargs):
if type(self.destination)==PageRef:
return '<a title="page: %s" class="rm-alias rm-alias-page">%s</a>' % (
html.escape(self.destination.title), html.escape(self.alias))
elif type(self.destination)==BlockRef:
return '<a title="block: %s" class="rm-alias rm-alias-block">%s</a>' % (
html.escape(self.destination.to_string(expand=True)), html.escape(self.alias))
else:
return '<a title="url: {0}" class="rm-alias rm-alias-external" href="{0}">{1}</a>'.format(
html.escape(self.destination.to_string()), html.escape(self.alias))
def get_tags(self):
return self.destination.get_tags()
def get_contents(self):
return self.destination.get_contents()
@classmethod
def create_pattern(cls, string=None):
re_template = r"\[[^\[\]]+\]\(%s\)"
destination_pats = []
for o in [PageRef, BlockRef]:
dest_pat = o.create_pattern(string)
destination_pats += re.split(RE_SPLIT_OR, dest_pat) if dest_pat else []
destination_pats.append("[^\(\)\[\]]+") # TODO: replace this with a real url regex
return "|".join([re_template % pat for pat in destination_pats])
def __eq__(self, other):
return type(self)==type(other) and self.alias==other.alias and other.destination==other.destination
class CodeBlock(BlockContentItem):
def __init__(self, code, language=None, string=None):
self.code = code
self.language = language
self.string = string
@classmethod
def from_string(cls, string, **kwargs):
super().from_string(string)
supported_languages = [
"clojure", "css", "elixir", "html", "plain text", "python", "ruby",
"swift", "typescript", "isx", "yaml", "rust", "shell", "php", "java",
"c#", "c++", "objective-c", "kotlin", "sql", "haskell", "scala",
"common lisp", "julia", "sparql", "turtle", "javascript"]
pat_lang = "^```(%s)\n" % "|".join([re.escape(l) for l in supported_languages])
match_lang = re.search(pat_lang, string)
if match_lang:
language = match_lang.group(1)
pat = re.compile(f"```{language}\n([^`]*)```")
else:
language = None
pat = re.compile("```([^`]*)```")
code = re.search(pat, string).group(1)
return cls(code, language, string)
@classmethod
def create_pattern(cls, string=None):
return f"```[^`]*```"
def to_string(self):
if self.string: return self.string
if self.language:
return f'```{self.language}\n{self.code}```'
else:
return f'```{self.code}```'
def to_html(self, *args, **kwargs):
code = html.escape(self.code)
return f'<pre><code>{code}</code></pre>'
def __eq__(self, other):
return type(self)==type(other) and self.language==other.language and self.code==other.code
class CodeInline(BlockContentItem):
def __init__(self, code, string=None):
self.code = code
self.string = string
@classmethod
def from_string(cls, string, **kwargs):
super().from_string(string)
pat = re.compile("`([^`]*)`")
code = re.search(pat, string).group(1)
return cls(code, string)
@classmethod
def create_pattern(cls, string=None):
return "`[^`]*`"
def to_string(self):
if self.string: return self.string
return f'`{self.code}`'
def to_html(self, *args, **kwargs):
code = html.escape(self.code)
return f'<code>{code}</code>'
def __eq__(self, other):
return type(self)==type(other) and self.code==other.code
class Checkbox(BlockContentItem):
def __init__(self, checked=False):
self.checked = checked
@classmethod
def from_string(cls, string, validate=True, **kwargs):
super().from_string(string, validate)
return cls(checked="DONE" in string)
@classmethod
def create_pattern(cls, string=None):
return re.escape("{{[[TODO]]}}")+"|"+re.escape("{{[[DONE]]}}")
def get_tags(self):
return ["DONE"] if self.checked else ["TODO"]
def to_string(self):
return "{{[[DONE]]}}" if self.checked else "{{[[TODO]]}}"
def to_html(self, *arg, **kwargs):
if self.checked:
return '<span><label class="check-container"><input type="checkbox" checked=""><span class="checkmark"></span></label></span>'
else:
return '<span><label class="check-container"><input type="checkbox"><span class="checkmark"></span></label></span>'
def __eq__(self, other):
return type(self)==type(other) and self.checked==other.checked
class View(BlockContentItem):
def __init__(self, name: BlockContentItem, text, string=None):
if type(name)==str:
name = String(name)
self.name = name
self.text = text
self.string = string
@classmethod
def from_string(cls, string, validate=True, **kwargs):
super().from_string(string, validate)
name, text = re.search("{{([^:]*):(.*)}}", string).groups()
if re.match("^\[\[.*\]\]$", name):
name = PageRef.from_string(name)
else:
name = String(name)
return cls(name, text, string)
def to_html(self, *arg, **kwargs):
return html.escape(self.text)
def get_tags(self):
return self.name.get_tags()
def get_contents(self):
return self.name.get_contents()
@classmethod
def create_pattern(cls, strings=None):
re_template = "{{%s:.*}}"
pats = []
for view in ["youtube", "query", "mentions"]:
pats.append(re_template % view)
pats.append(re_template % re.escape(f"[[{view}]]"))
return "|".join(pats)
def to_string(self):
if self.string:
return self.string
return "{{%s:%s}}" % (self.name.to_string(), self.text)
def __eq__(self, other):
return type(self)==type(other) and self.name==other.name and self.text==other.text
class Embed(BlockContentItem):
def __init__(self, name: BlockContentItem, blockref, string=None):
if type(name)==str:
name = String(name)
self.name = name
self.blockref = blockref
self.string = string
@classmethod
def from_string(cls, string, validate=True, **kwargs):
super().from_string(string, validate)
name, blockref = re.search("{{([^:]*):\s*([^\s]*)\s*}}", string).groups()
if re.match("^\[\[.*\]\]$", name):
name = PageRef.from_string(name)
else:
name = String(name)
blockref = BlockRef.from_string(blockref, **kwargs)
return cls(name, blockref, string)
def to_html(self, *arg, **kwargs):
block = self.blockref.get_referenced_block()
if block:
inner_html = block.to_html(children=True, *arg, **kwargs)
else:
inner_html = self.blockref.to_html(*arg, **kwargs)
return '<div class="rm-embed-container">' + \
inner_html + \
'</div>'
def get_tags(self):
return self.name.get_tags()
def get_contents(self):
return self.name.get_contents()
@classmethod
def create_pattern(cls, strings=None):
pats = []
pats.append("{{embed:\s*%s\s*}}" % BlockRef.create_pattern())
pats.append("{{\[\[embed\]\]:\s*%s\s*}}" % BlockRef.create_pattern())
return "|".join(pats)
def to_string(self):
if self.string:
return self.string
return "{{%s:%s}}" % (self.name.to_string(), self.blockref.to_string())
def __eq__(self, other):
return type(self)==type(other) and self.name==other.name and self.blockref==other.blockref
class Button(BlockContentItem):
def __init__(self, name, text="", string=None):
self.name = name
self.text = text
self.string = string
@classmethod
def from_string(cls, string, validate=True, **kwargs):
super().from_string(string, validate)
contents = string[2:-2]
if ":" in contents:
m = re.search(r"([^:]*):(.*)", contents)
name, text = m.groups()
else:
name, text = contents, ""
return cls(name, text, string)
def get_tags(self):
return BlockContent.from_string(self.text).get_tags()
def get_contents(self):
return BlockContent.from_string(self.text).get_contents()
def to_string(self):
if self.string: return self.string
if self.text:
return "{{%s:%s}}" % (self.name, self.text)
else:
return "{{%s}}" % self.name
def to_html(self, *arg, **kwargs):
return '<button class="bp3-button bp3-small dont-focus-block">%s</button>' % html.escape(self.name)
@classmethod
def create_pattern(cls, string=None):
return "{{.(?:(?<!{{).)*}}"
def __eq__(self, other):
return type(self)==type(other) and self.name==other.name and self.text==other.text
class PageRef(BlockContentItem):
def __init__(self, title, uid="", string=None):
"""
Args:
title (str or BlockContent)
"""
if type(title)==str: title = PageRef.find_and_replace(title)
self._title = title
self.uid = uid
self.string = string
@property
def title(self):
return self._title.to_string()
@classmethod
def from_string(cls, string, validate=True, **kwargs):
super().from_string(string, validate)
roam_objects = PageRef.find_and_replace(string[2:-2])
return cls(roam_objects, string=string)
@classmethod
def create_pattern(cls, string, groups=False):
page_refs = PageRef.extract_page_ref_strings(string)
if not page_refs:
return None
if groups:
titles = [re.escape(p[2:-2]) for p in page_refs]
return "|".join([f"(\[\[)({t})(\]\])" for t in titles])
else:
return "|".join([re.escape(p) for p in page_refs])
def get_tags(self):
tags_in_title = [o.get_tags() for o in self._title]
tags_in_title = list(set(reduce(lambda x,y: x+y, tags_in_title)))
return [self.title] + tags_in_title
def get_contents(self):
items = []
for item in self._title:
items += item.get_contents()
return items
def get_namespace(self):
return os.path.split(self.title)[0]
def get_basename(self):
return os.path.split(self.title)[1]
def to_string(self):
if self.string: return self.string
return f"[[{self.title}]]"
def to_html(self, title=None, *args, **kwargs):
#if not title: title=self.title
# Page ref is just a string
if title:
title_html = title
elif set([type(o) for o in self._title]) == set([String]):
title = html.escape(self._title.to_string())
title_split = title.split("/")
if len(title_split) == 1:
title_html = title
else:
namespace, name = "/".join(title_split[:-1]) + "/", title_split[-1]
title_html = \
f'<span class="rm-page-ref-namespace">{namespace}</span>'\
f'<span class="rm-page-ref-name">{name}</span>'
else:
title_html = "".join([o.to_html() for o in self._title])
uid_attr = f' data-link-uid="{self.uid}"' if self.uid else ''
return \
f'<span data-link-title="{html.escape(self.title)}"{uid_attr}>'\
f'<span class="rm-page-ref-brackets">[[</span>'\
f'<span class="rm-page-ref rm-page-ref-link-color">{title_html}</span>'\
f'<span class="rm-page-ref-brackets">]]</span>'\
f'</span>'
@staticmethod
def extract_page_ref_strings(string):
# https://stackoverflow.com/questions/524548/regular-expression-to-detect-semi-colon-terminated-c-for-while-loops/524624#524624
bracket_count = 0
pages = []
page = ""
prev_char = ""
for j,c in enumerate(string):
# Track page opening and closing
if prev_char+c == "[[":
if not page:
page = string[j-1]
bracket_count += 1
prev_char = ""
elif prev_char+c == "]]":
bracket_count -= 1
prev_char = ""
else:
prev_char = c
if page:
page += c
# End of page
if bracket_count == 0 and page:
pages.append(page)
page = ""
return pages
def __eq__(self, other):
return type(self)==type(other) and self.title==other.title
class PageTag(BlockContentItem):
def __init__(self, title, string=None):
"""
Args:
title (str or BlockContent)
"""
if type(title)==str: title = PageRef.find_and_replace(title)
self._title = title
self.string = string
@classmethod
def from_string(cls, string, validate=True, **kwargs):
super().from_string(string, validate)
title = re.sub("\[\[([\W\w]*)\]\]", "\g<1>", string[1:])
roam_objects = PageRef.find_and_replace(title)
return cls(roam_objects, string)
@property
def title(self):
return self._title.to_string()
def get_tags(self):
tags_in_title = [o.get_tags() for o in self._title]
tags_in_title = list(set(reduce(lambda x,y: x+y, tags_in_title)))
return [self.title] + tags_in_title
def get_contents(self):
items = []
for item in self._title:
items += item.get_contents()
return items
def to_string(self):
if self.string:
return self.string
return "#"+self.title
def to_html(self, *arg, **kwargs):
return \
f'<span data-tag="{html.escape(self.title)}" '\
f'class="rm-page-ref rm-page-ref-tag">#{html.escape(self.title)}</span>'
@classmethod
def create_pattern(cls, string):
pats = ["#[\w\-_@\.]+"]
# Create pattern for page refs which look like tags
page_ref_pat = PageRef.create_pattern(string)
if page_ref_pat:
pats += ["#"+pat for pat in re.split(RE_SPLIT_OR, page_ref_pat)]
return "|".join(pats)
def __eq__(self, other):
return type(self)==type(other) and self.title == other.title
class BlockRef(BlockContentItem):
def __init__(self, uid, roam_db=None, string=None):
self.uid = uid
self.roam_db = roam_db
self.string = string
@classmethod
def from_string(cls, string, *args, **kwargs):
super().from_string(string)
roam_db = kwargs.get("roam_db", None)
return cls(string[2:-2], roam_db=roam_db, string=string)
def to_string(self, expand=False):
if expand:
block = self.get_referenced_block()
if block:
return block.to_string()
if self.string:
return self.string
else:
return f"(({self.uid}))"
def to_html(self, *arg, **kwargs):
block = self.get_referenced_block()
text = block.to_html() if block else html.escape(self.to_string())
return '<div class="rm-block-ref"><span>%s</span></div>' % text
def get_tags(self):
return []
@classmethod
def create_pattern(cls, string=None):
return "\(\([\w\d\-_]{9}\)\)"
def get_referenced_block(self):
if self.roam_db:
return self.roam_db.query_by_uid(self.uid)
def __eq__(self, other):
return type(self)==type(other) and self.uid==other.uid
class Url(BlockContentItem):
def __init__(self, text):
self.text = text
@classmethod
def from_string(cls, string, **kwargs):
super().from_string(string)
return cls(string)
def to_string(self):
return self.text
def to_html(self, *arg, **kwargs):
return f'<span><a href="{html.escape(self.text)}">{html.escape(self.text)}</a></span>'
def __eq__(self, other):
return type(self)==type(other) and self.text==other.text
class String(BlockContentItem):
def __init__(self, string):
if type(string) == String:
string == string.to_string()
self.string = string
@classmethod
def from_string(cls, string, validate=True, **kwargs):
super().from_string(string, validate)
return cls(string)
@classmethod
def validate_string(cls, string):
return True
def to_html(self, *arg, **kwargs):
return html.escape(self.to_string()).replace("\n", "<br>")
def get_tags(self):
return []
def to_string(self):
return self.string
def __eq__(self, other):
return type(self)==type(other) and self.string==other.string
class Attribute(BlockContentItem):
def __init__(self, title, string=None):
self.title = title
self.string = string
@classmethod
def from_string(cls, string, validate=True, **kwargs):
super().from_string(string, validate)
return cls(string[:-2], string)
@classmethod
def validate_string(cls, string):
pat = re.compile(cls.create_pattern(string)+"$")
if re.match(pat, string):
return True
return False
@classmethod
def create_pattern(cls, string=None):
return "^(?:(?<!:)[^:])+::"
def to_html(self, *arg, **kwargs):
return '<span><strong>%s:</strong></span>' % html.escape(self.title)
def get_tags(self):
return [self.title]
def to_string(self):
if self.string:
return self.string
return self.title+"::"
def __eq__(self, other):
return type(self)==type(other) and self.title==other.title
| 33.100666 | 138 | 0.560284 | import os
import re
import logging
from functools import reduce
from itertools import zip_longest
from collections.abc import Iterable
import html
logger = logging.getLogger(__name__)
RE_SPLIT_OR = "(?<!\\\)\|"
class BlockContent(list):
def __init__(self, roam_objects=[]):
if type(roam_objects) not in [list, BlockContent]:
roam_objects = [roam_objects]
for obj in roam_objects:
if type(obj) in [str, int, float]:
obj = String(str(obj))
elif isinstance(obj, BlockContentItem):
pass
else:
raise ValueError(f"roam_objects can't contain {type(obj)} type objects")
self.append(obj)
@classmethod
def find_and_replace(cls, obj, skip=[], *args, **kwargs):
roam_object_types = [
BlockQuote,
CodeBlock,
CodeInline,
Cloze,
Image,
Alias,
Checkbox,
Embed,
View,
Button,
PageTag,
PageRef,
BlockRef,
Attribute,
#Url, #TODO: don't have a good regex for this right now
]
roam_object_types = [o for o in roam_object_types if o not in skip]
roam_objects = BlockContent(obj)
for rm_obj_type in roam_object_types:
roam_objects = rm_obj_type.find_and_replace(roam_objects, *args, **kwargs)
return cls(roam_objects)
@classmethod
def from_string(cls, string, *args, **kwargs):
return cls.find_and_replace(string, *args, **kwargs)
def get_tags(self):
tags = []
for obj in self:
tags += obj.get_tags()
return list(set(tags))
def to_string(self):
return "".join([o.to_string() for o in self])
def to_html(self, *args, **kwargs):
res = "".join([o.to_html(*args, **kwargs) for o in self])
res = self._all_emphasis_to_html(res)
return res
def is_single_pageref(self):
return len(self)==1 and type(self[0])==PageRef
def get_strings(self):
return [o for o in self if type(o)==String]
@staticmethod
def _get_emphasis_locs(string, emphasis):
emphasis_locs = []
emphasis_start = emphasis_end = None
for i,c in enumerate(string):
if emphasis_start is None and string[i:i+len(emphasis)] == emphasis:
emphasis_start = i
continue
if emphasis_end is None and string[i:i+len(emphasis)] == emphasis:
emphasis_end = i + (len(emphasis)-1)
emphasis_locs.append((emphasis_start, emphasis_end))
emphasis_start = emphasis_end = None
return emphasis_locs
def _emphasis_to_html(self, string, emphasis, html_left, html_right):
emphasis_locs = self._get_emphasis_locs(string, emphasis)
diff = 0
for (i, j) in emphasis_locs:
i, j = i + diff, j + diff
string = string[:i] + html_left + string[i+len(emphasis):j-len(emphasis)+1] + html_right + string[j+1:]
diff += len(html_left+html_right) - len(emphasis+emphasis)
return string
def _all_emphasis_to_html(self, string):
string = self._emphasis_to_html(string, emphasis="`", html_left="<code>", html_right="</code>")
string = self._emphasis_to_html(string, emphasis="**", html_left="<b>", html_right="</b>")
string = self._emphasis_to_html(string, emphasis="__", html_left="<em>", html_right="</em>")
string = self._emphasis_to_html(string, emphasis="^^", html_left='<span class="roam-highlight">', html_right='</span>')
return string
def __repr__(self):
return "<%s(%s)>" % (
self.__class__.__name__, repr(list(self)))
def get_contents(self, recursive=False):
if not recursive:
return list(self)
else:
items = []
for item in self:
items += [item]
items += item.get_contents()
return items
def merge_adjacent_strings(self):
i = 0
while i + 1 < len(self):
if type(self[i]) == String and type(self[i+1]) == String:
self[i].string += self[i+1].string
del self[i+1]
else:
i += 1
class BlockContentItem:
@classmethod
def from_string(cls, string, validate=True):
if validate and not cls.validate_string(string):
raise ValueError(f"Invalid string '{string}' for {cls.__name__}")
@classmethod
def validate_string(cls, string):
pat = cls.create_pattern(string)
pat = "|".join([f"^{p}$" for p in re.split(RE_SPLIT_OR, pat)])
if re.match(re.compile(pat), string):
return True
return False
def to_string(self):
raise NotImplementedError
def to_html(self, *args, **kwargs):
return self.string
def get_tags(self):
return []
def get_contents(self):
return []
@classmethod
def _find_and_replace(cls, string, *args, **kwargs):
pat = cls.create_pattern(string)
if not pat:
return [String(string)]
roam_objects = [cls.from_string(s, validate=False, *args, **kwargs) for s in re.findall(pat, string)]
string_split = [String(s) for s in re.split(pat, string)]
roam_objects = [a for b in zip_longest(string_split, roam_objects) for a in b if a]
roam_objects = [o for o in roam_objects if o.to_string()]
return roam_objects
@classmethod
def find_and_replace(cls, string, *args, **kwargs):
if type(string)==str:
roam_objects = BlockContent([String(string)])
elif type(string)==BlockContent:
roam_objects = string
else:
raise ValueError(f"'{type(string)}' is an invalid type for `string`")
new_roam_objects = []
for obj in roam_objects:
if type(obj)==String:
new_roam_objects += cls._find_and_replace(obj.to_string(), *args, **kwargs)
else:
new_roam_objects += [obj]
roam_objects = new_roam_objects
return BlockContent(roam_objects)
def __repr__(self):
return "<%s(string='%s')>" % (
self.__class__.__name__, self.to_string())
def __eq__(self, b):
return self.to_string()==b.to_string()
class BlockQuote(BlockContentItem):
def __init__(self, block_content, prefix="> "):
self.block_content = block_content
self.prefix = prefix
@classmethod
def from_string(cls, string, validate=True, **kwargs):
super().from_string(string, validate)
prefix, quote = re.match("^(>\s?)([\w\W]*)$", string).groups()
block_content = BlockContent.from_string(quote, **kwargs)
return cls(block_content, prefix=prefix)
def to_string(self):
return self.prefix + self.block_content.to_string()
def to_html(self, *args, **kwargs):
return '<blockquote class="rm-bq">' + self.block_content.to_html(*args, **kwargs) + '</blockquote>'
def get_tags(self):
return self.block_content.get_tags()
def get_contents(self):
return self.block_content.get_contents()
@classmethod
def create_pattern(cls, string=None):
return "^>[\w\W]*$"
def __eq__(self, other):
return type(self)==type(other) and self.block_content.to_string()==other.block_content.to_string()
class ClozeLeftBracket(BlockContentItem):
def __init__(self, id=None, enclosed=False, c=False, sep=""):
self.id = id
self.enclosed = enclosed
self.c = c
self.sep = sep
@classmethod
def _find_and_replace(cls, string):
pats = [
"\[\[{c?\d*[:|]?\]\]",
"(?<!{){c?\d+[:|]",
"(?<!{){(?!{)"
]
matches = list(re.finditer("|".join(pats), string))
if not matches:
return [String(string)]
objs = []
last_cloze_end = 0
for match in matches:
text = match.group(0)
c = "c" in text
enclosed = text.startswith("[[")
m = re.search("\d+", text)
id = int(m.group(0)) if m else None
if ":" in text:
sep = ":"
elif "|" in text:
sep = "|"
else:
sep = ""
objs.append(String(string[last_cloze_end:match.start()]))
objs.append(cls(id, enclosed, c, sep))
last_cloze_end = match.end()
if last_cloze_end != len(string):
objs.append(String(string[last_cloze_end:]))
return BlockContent(objs)
def to_string(self):
res = "{"
if self.c:
res += "c"
if self.id:
res += str(self.id)
if self.sep:
res += self.sep
if self.enclosed:
res = "[[" + res + "]]"
return res
def to_html(self):
return "{{c" + str(self.id) + "::"
def __repr__(self):
return "<%s(string='%s')>" % (
self.__class__.__name__, self.to_string())
class ClozeRightBracket(BlockContentItem):
def __init__(self, enclosed=False, hint=None, string=None):
self.enclosed = enclosed
self.hint = hint
self.string = string
@classmethod
def _find_and_replace(cls, string):
pats = [
"\[\[(?:::[^}\[]*)?}\]\]",
"\[\[(?:::[^}\[]*)\]\]}",
"(?:::[^}\[]*)}(?!})",
"(?<!})}(?!})",
]
matches = re.finditer("|".join(pats), string)
if not matches:
return [String(string)]
objs = []
last_cloze_end = 0
for match in matches:
text = match.group(0)
if text.startswith("[[") and text.endswith("]]"):
hint = ClozeHint(re.sub("[\[\]}]", "", text)[2:]) if "::" in text else None
enclosed = True
elif text.startswith("[[") and text.endswith("}"):
hint = ClozeHint(re.sub("[\[\]}]", "", text)[2:], enclosed=True)
enclosed = False
else:
hint = ClozeHint(re.sub("[\[\]}]", "", text)[2:]) if "::" in text else None
enclosed = False
objs.append(String(string[last_cloze_end:match.start()]))
objs.append(cls(enclosed, hint=hint))
last_cloze_end = match.end()
if last_cloze_end != len(string):
objs.append(String(string[last_cloze_end:]))
return BlockContent(objs)
def to_string(self):
res = "}"
if self.hint:
res = self.hint.to_string() + res
if self.enclosed:
res = "[[" + res + "]]"
return res
def to_html(self):
if self.hint:
return self.hint.to_html() + "}}"
return "}}"
def __repr__(self):
return "<%s(string='%s')>" % (
self.__class__.__name__, self.to_string())
class ClozeHint(BlockContentItem):
def __init__(self, text, enclosed=False):
self.text = text
self.enclosed = enclosed
@classmethod
def from_string(cls, hint):
return cls(hint[2:])
@classmethod
def _find_and_replace(cls, string):
pats = [
"\[\[::[^\]]*\]\]",
"::[^}\[]*"
]
matches = re.finditer("|".join(pats), string)
if not matches:
return BlockContent(string)
objs = []
last_cloze_end = 0
for match in matches:
text = match.group(0)
if text.startswith("[["):
enclosed = True
text = text[2:-2]
else:
enclosed = False
text = text[2:]
objs.append(String(string[last_cloze_end:match.start()]))
objs.append(cls(text, enclosed))
last_cloze_end = match.end()
if last_cloze_end != len(string):
objs.append(String(string[last_cloze_end:]))
return BlockContent(objs)
def to_string(self):
res = "::" + str(self.text)
if self.enclosed:
res = "[[" + res + "]]"
return res
def to_html(self):
return "::" + str(self.text)
class Cloze(BlockContentItem):
def __init__(self, inner:BlockContent="", left_bracket:ClozeLeftBracket=None, right_bracket:ClozeRightBracket=None,
hint:ClozeHint=None, id=1, c=True, sep=":", enclosed=False, string=None, roam_db=None):
self.inner = BlockContent(inner)
self.left_bracket = left_bracket or ClozeLeftBracket(id=id, c=c, enclosed=enclosed, sep=sep)
self.right_bracket = right_bracket or ClozeRightBracket(enclosed=enclosed)
if self.right_bracket.hint and hint:
raise ValueError("Only allowed one hint")
if type(hint) == str:
hint = ClozeHint(hint)
self._hint = hint
self.string = string
self.roam_db = roam_db
@property
def hint(self):
return self._hint or self.right_bracket.hint
@property
def id(self):
return self.left_bracket.id if self.left_bracket else None
@id.setter
def id(self, id):
self.left_bracket.id = id
@classmethod
def from_string(cls, string, validate=True, **kwargs):
objs = cls.find_and_replace(string)
if len(objs) != 1 or type(objs[0]) != cls:
raise ValueError(f"Invalid string '{string}' for {cls.__name__}")
return objs[0]
@classmethod
def find_and_replace(cls, string, *args, **kwargs):
objs = BlockContent(string)
objs = ClozeLeftBracket.find_and_replace(objs)
objs = ClozeRightBracket.find_and_replace(objs)
objs = ClozeHint.find_and_replace(objs)
res = []
next_idx = 0
left_idx = right_idx = None
for i, obj in enumerate(objs):
if right_idx is None and type(obj) == ClozeLeftBracket:
res += objs[next_idx:i]
next_idx = left_idx = i
elif left_idx is not None and type(obj) == ClozeRightBracket:
inner = objs[left_idx+1:i]
hint = None
if type(inner[-1]) == ClozeHint:
inner, hint = inner[:-1], inner[-1]
inner = BlockContent.find_and_replace(inner)
cloze = cls(inner=inner, left_bracket=objs[left_idx], right_bracket=obj, hint=hint)
res.append(cloze)
left_idx = right_idx = None
next_idx = i+1
elif left_idx is not None and type(obj) == ClozeLeftBracket:
res += objs[left_idx:i]
next_idx = left_idx = i
elif right_idx is not None and type(obj) == ClozeRightBracket:
res += objs[right_idx:i]
next_idx = right_idx = i
res += objs[next_idx:]
for i, obj in enumerate(res):
if type(obj) in [ClozeLeftBracket, ClozeRightBracket, ClozeHint]:
res[i] = String(obj.to_string())
cls._assign_cloze_ids([o for o in res if type(o)==Cloze])
bc = BlockContent(res)
bc.merge_adjacent_strings()
return bc
def get_tags(self):
return self.inner.get_tags()
def to_string(self, style="anki"):
if style=="anki":
return "{{c%s::%s%s}}" % (self.id, self.inner.to_string(), self.hint.to_string() if self.hint else "")
elif style=="roam":
res = ""
for o in [self.left_bracket, self.inner, self._hint, self.right_bracket]:
res += o.to_string() if o else ""
return res
else:
raise ValueError(f"style='{style}' is an invalid. "\
"Must be 'anki' or 'roam'")
def to_html(self, *args, **kwargs):
kwargs['roam_db'] = self.roam_db
proc_cloze = kwargs.get("proc_cloze", True)
pageref_cloze = kwargs.get("pageref_cloze", "outside")
if not proc_cloze:
bc = BlockContent.find_and_replace(self.to_string("roam"), skip=[Cloze])
return bc.to_html(*args, **kwargs)
# Fancy options to move around the cloze when it's only around a PageRef
if self.inner.is_single_pageref() and self.hint is None:
pageref = self.inner[0]
if pageref_cloze=="outside":
content = pageref.to_html()
return Cloze(id=self.id, inner=content, hint=self.hint).to_string()
elif pageref_cloze=="inside":
clozed_title = Cloze(id=self.id, inner=pageref.title, hint=self.hint).to_string()
return pageref.to_html(title=clozed_title)
elif pageref_cloze=="base_only":
clozed_base = Cloze(id=self.id, inner=pageref.get_basename(), hint=self.hint).to_string()
namespace = pageref.get_namespace()
if namespace:
clozed_base = namespace + "/" + clozed_base
return pageref.to_html(title=clozed_base)
else:
raise ValueError(f"{pageref_cloze} is an invalid option for `pageref_cloze`")
res = ""
for o in [self.left_bracket, self.inner, self._hint, self.right_bracket]:
res += o.to_html() if o else ""
return res
@staticmethod
def _assign_cloze_ids(clozes):
assigned_ids = [c.id for c in clozes if c.id]
next_id = 1
for cloze in clozes:
if cloze.id: continue
while next_id in assigned_ids:
next_id += 1
assigned_ids += [next_id]
cloze.id = next_id
def __repr__(self):
string = self.string or self.to_string(style="roam")
return "<%s(id=%s, string='%s')>" % (
self.__class__.__name__, self.id, string)
def __eq__(self, other):
return type(self)==type(other) and self.inner == other.inner
class Image(BlockContentItem):
def __init__(self, src, alt="", string=None):
self.src = src
self.alt = alt
self.string = string
@classmethod
def from_string(cls, string, validate=True, **kwargs):
super().from_string(string, validate)
alt, src = re.search("!\[([^\[\]]*)\]\(([^\)\n]+)\)", string).groups()
return cls(src, alt)
@classmethod
def create_pattern(cls, string=None):
return r"!\[[^\[\]]*\]\([^\)\n]+\)"
def to_string(self):
if self.string:
return self.string
return f""
def to_html(self, *arg, **kwargs):
return f'<img src="{html.escape(self.src)}" alt="{html.escape(self.alt)}" draggable="false" class="rm-inline-img">'
def __eq__(self, other):
return type(self)==type(other) and self.src==other.src and self.alt==other.alt
class Alias(BlockContentItem):
def __init__(self, alias, destination, string=None):
self.alias = alias
self.destination = destination
self.string = string
@classmethod
def from_string(cls, string, validate=True, **kwargs):
super().from_string(string, validate)
alias, destination = re.search(r"^\[([^\[\]]+)\]\(([\W\w]+)\)$", string).groups()
if re.match("^\[\[.*\]\]$", destination):
destination = PageRef.from_string(destination)
elif re.match("^\(\(.*\)\)$", destination):
roam_db = kwargs.get("roam_db", None)
destination = BlockRef.from_string(destination, roam_db=roam_db)
else:
destination = String(destination)
return cls(alias, destination, string)
def to_string(self):
if self.string:
return self.string
return f"[{self.alias}]({self.destination.to_string()})"
def to_html(self, *arg, **kwargs):
if type(self.destination)==PageRef:
return '<a title="page: %s" class="rm-alias rm-alias-page">%s</a>' % (
html.escape(self.destination.title), html.escape(self.alias))
elif type(self.destination)==BlockRef:
return '<a title="block: %s" class="rm-alias rm-alias-block">%s</a>' % (
html.escape(self.destination.to_string(expand=True)), html.escape(self.alias))
else:
return '<a title="url: {0}" class="rm-alias rm-alias-external" href="{0}">{1}</a>'.format(
html.escape(self.destination.to_string()), html.escape(self.alias))
def get_tags(self):
return self.destination.get_tags()
def get_contents(self):
return self.destination.get_contents()
@classmethod
def create_pattern(cls, string=None):
re_template = r"\[[^\[\]]+\]\(%s\)"
destination_pats = []
for o in [PageRef, BlockRef]:
dest_pat = o.create_pattern(string)
destination_pats += re.split(RE_SPLIT_OR, dest_pat) if dest_pat else []
destination_pats.append("[^\(\)\[\]]+")
return "|".join([re_template % pat for pat in destination_pats])
def __eq__(self, other):
return type(self)==type(other) and self.alias==other.alias and other.destination==other.destination
class CodeBlock(BlockContentItem):
def __init__(self, code, language=None, string=None):
self.code = code
self.language = language
self.string = string
@classmethod
def from_string(cls, string, **kwargs):
super().from_string(string)
supported_languages = [
"clojure", "css", "elixir", "html", "plain text", "python", "ruby",
"swift", "typescript", "isx", "yaml", "rust", "shell", "php", "java",
"c#", "c++", "objective-c", "kotlin", "sql", "haskell", "scala",
"common lisp", "julia", "sparql", "turtle", "javascript"]
pat_lang = "^```(%s)\n" % "|".join([re.escape(l) for l in supported_languages])
match_lang = re.search(pat_lang, string)
if match_lang:
language = match_lang.group(1)
pat = re.compile(f"```{language}\n([^`]*)```")
else:
language = None
pat = re.compile("```([^`]*)```")
code = re.search(pat, string).group(1)
return cls(code, language, string)
@classmethod
def create_pattern(cls, string=None):
return f"```[^`]*```"
def to_string(self):
if self.string: return self.string
if self.language:
return f'```{self.language}\n{self.code}```'
else:
return f'```{self.code}```'
def to_html(self, *args, **kwargs):
code = html.escape(self.code)
return f'<pre><code>{code}</code></pre>'
def __eq__(self, other):
return type(self)==type(other) and self.language==other.language and self.code==other.code
class CodeInline(BlockContentItem):
def __init__(self, code, string=None):
self.code = code
self.string = string
@classmethod
def from_string(cls, string, **kwargs):
super().from_string(string)
pat = re.compile("`([^`]*)`")
code = re.search(pat, string).group(1)
return cls(code, string)
@classmethod
def create_pattern(cls, string=None):
return "`[^`]*`"
def to_string(self):
if self.string: return self.string
return f'`{self.code}`'
def to_html(self, *args, **kwargs):
code = html.escape(self.code)
return f'<code>{code}</code>'
def __eq__(self, other):
return type(self)==type(other) and self.code==other.code
class Checkbox(BlockContentItem):
def __init__(self, checked=False):
self.checked = checked
@classmethod
def from_string(cls, string, validate=True, **kwargs):
super().from_string(string, validate)
return cls(checked="DONE" in string)
@classmethod
def create_pattern(cls, string=None):
return re.escape("{{[[TODO]]}}")+"|"+re.escape("{{[[DONE]]}}")
def get_tags(self):
return ["DONE"] if self.checked else ["TODO"]
def to_string(self):
return "{{[[DONE]]}}" if self.checked else "{{[[TODO]]}}"
def to_html(self, *arg, **kwargs):
if self.checked:
return '<span><label class="check-container"><input type="checkbox" checked=""><span class="checkmark"></span></label></span>'
else:
return '<span><label class="check-container"><input type="checkbox"><span class="checkmark"></span></label></span>'
def __eq__(self, other):
return type(self)==type(other) and self.checked==other.checked
class View(BlockContentItem):
def __init__(self, name: BlockContentItem, text, string=None):
if type(name)==str:
name = String(name)
self.name = name
self.text = text
self.string = string
@classmethod
def from_string(cls, string, validate=True, **kwargs):
super().from_string(string, validate)
name, text = re.search("{{([^:]*):(.*)}}", string).groups()
if re.match("^\[\[.*\]\]$", name):
name = PageRef.from_string(name)
else:
name = String(name)
return cls(name, text, string)
def to_html(self, *arg, **kwargs):
return html.escape(self.text)
def get_tags(self):
return self.name.get_tags()
def get_contents(self):
return self.name.get_contents()
@classmethod
def create_pattern(cls, strings=None):
re_template = "{{%s:.*}}"
pats = []
for view in ["youtube", "query", "mentions"]:
pats.append(re_template % view)
pats.append(re_template % re.escape(f"[[{view}]]"))
return "|".join(pats)
def to_string(self):
if self.string:
return self.string
return "{{%s:%s}}" % (self.name.to_string(), self.text)
def __eq__(self, other):
return type(self)==type(other) and self.name==other.name and self.text==other.text
class Embed(BlockContentItem):
def __init__(self, name: BlockContentItem, blockref, string=None):
if type(name)==str:
name = String(name)
self.name = name
self.blockref = blockref
self.string = string
@classmethod
def from_string(cls, string, validate=True, **kwargs):
super().from_string(string, validate)
name, blockref = re.search("{{([^:]*):\s*([^\s]*)\s*}}", string).groups()
if re.match("^\[\[.*\]\]$", name):
name = PageRef.from_string(name)
else:
name = String(name)
blockref = BlockRef.from_string(blockref, **kwargs)
return cls(name, blockref, string)
def to_html(self, *arg, **kwargs):
block = self.blockref.get_referenced_block()
if block:
inner_html = block.to_html(children=True, *arg, **kwargs)
else:
inner_html = self.blockref.to_html(*arg, **kwargs)
return '<div class="rm-embed-container">' + \
inner_html + \
'</div>'
def get_tags(self):
return self.name.get_tags()
def get_contents(self):
return self.name.get_contents()
@classmethod
def create_pattern(cls, strings=None):
pats = []
pats.append("{{embed:\s*%s\s*}}" % BlockRef.create_pattern())
pats.append("{{\[\[embed\]\]:\s*%s\s*}}" % BlockRef.create_pattern())
return "|".join(pats)
def to_string(self):
if self.string:
return self.string
return "{{%s:%s}}" % (self.name.to_string(), self.blockref.to_string())
def __eq__(self, other):
return type(self)==type(other) and self.name==other.name and self.blockref==other.blockref
class Button(BlockContentItem):
def __init__(self, name, text="", string=None):
self.name = name
self.text = text
self.string = string
@classmethod
def from_string(cls, string, validate=True, **kwargs):
super().from_string(string, validate)
contents = string[2:-2]
if ":" in contents:
m = re.search(r"([^:]*):(.*)", contents)
name, text = m.groups()
else:
name, text = contents, ""
return cls(name, text, string)
def get_tags(self):
return BlockContent.from_string(self.text).get_tags()
def get_contents(self):
return BlockContent.from_string(self.text).get_contents()
def to_string(self):
if self.string: return self.string
if self.text:
return "{{%s:%s}}" % (self.name, self.text)
else:
return "{{%s}}" % self.name
def to_html(self, *arg, **kwargs):
return '<button class="bp3-button bp3-small dont-focus-block">%s</button>' % html.escape(self.name)
@classmethod
def create_pattern(cls, string=None):
return "{{.(?:(?<!{{).)*}}"
def __eq__(self, other):
return type(self)==type(other) and self.name==other.name and self.text==other.text
class PageRef(BlockContentItem):
def __init__(self, title, uid="", string=None):
if type(title)==str: title = PageRef.find_and_replace(title)
self._title = title
self.uid = uid
self.string = string
@property
def title(self):
return self._title.to_string()
@classmethod
def from_string(cls, string, validate=True, **kwargs):
super().from_string(string, validate)
roam_objects = PageRef.find_and_replace(string[2:-2])
return cls(roam_objects, string=string)
@classmethod
def create_pattern(cls, string, groups=False):
page_refs = PageRef.extract_page_ref_strings(string)
if not page_refs:
return None
if groups:
titles = [re.escape(p[2:-2]) for p in page_refs]
return "|".join([f"(\[\[)({t})(\]\])" for t in titles])
else:
return "|".join([re.escape(p) for p in page_refs])
def get_tags(self):
tags_in_title = [o.get_tags() for o in self._title]
tags_in_title = list(set(reduce(lambda x,y: x+y, tags_in_title)))
return [self.title] + tags_in_title
def get_contents(self):
items = []
for item in self._title:
items += item.get_contents()
return items
def get_namespace(self):
return os.path.split(self.title)[0]
def get_basename(self):
return os.path.split(self.title)[1]
def to_string(self):
if self.string: return self.string
return f"[[{self.title}]]"
def to_html(self, title=None, *args, **kwargs):
if title:
title_html = title
elif set([type(o) for o in self._title]) == set([String]):
title = html.escape(self._title.to_string())
title_split = title.split("/")
if len(title_split) == 1:
title_html = title
else:
namespace, name = "/".join(title_split[:-1]) + "/", title_split[-1]
title_html = \
f'<span class="rm-page-ref-namespace">{namespace}</span>'\
f'<span class="rm-page-ref-name">{name}</span>'
else:
title_html = "".join([o.to_html() for o in self._title])
uid_attr = f' data-link-uid="{self.uid}"' if self.uid else ''
return \
f'<span data-link-title="{html.escape(self.title)}"{uid_attr}>'\
f'<span class="rm-page-ref-brackets">[[</span>'\
f'<span class="rm-page-ref rm-page-ref-link-color">{title_html}</span>'\
f'<span class="rm-page-ref-brackets">]]</span>'\
f'</span>'
@staticmethod
def extract_page_ref_strings(string):
bracket_count = 0
pages = []
page = ""
prev_char = ""
for j,c in enumerate(string):
if prev_char+c == "[[":
if not page:
page = string[j-1]
bracket_count += 1
prev_char = ""
elif prev_char+c == "]]":
bracket_count -= 1
prev_char = ""
else:
prev_char = c
if page:
page += c
if bracket_count == 0 and page:
pages.append(page)
page = ""
return pages
def __eq__(self, other):
return type(self)==type(other) and self.title==other.title
class PageTag(BlockContentItem):
def __init__(self, title, string=None):
if type(title)==str: title = PageRef.find_and_replace(title)
self._title = title
self.string = string
@classmethod
def from_string(cls, string, validate=True, **kwargs):
super().from_string(string, validate)
title = re.sub("\[\[([\W\w]*)\]\]", "\g<1>", string[1:])
roam_objects = PageRef.find_and_replace(title)
return cls(roam_objects, string)
@property
def title(self):
return self._title.to_string()
def get_tags(self):
tags_in_title = [o.get_tags() for o in self._title]
tags_in_title = list(set(reduce(lambda x,y: x+y, tags_in_title)))
return [self.title] + tags_in_title
def get_contents(self):
items = []
for item in self._title:
items += item.get_contents()
return items
def to_string(self):
if self.string:
return self.string
return "#"+self.title
def to_html(self, *arg, **kwargs):
return \
f'<span data-tag="{html.escape(self.title)}" '\
f'class="rm-page-ref rm-page-ref-tag">#{html.escape(self.title)}</span>'
@classmethod
def create_pattern(cls, string):
pats = ["#[\w\-_@\.]+"]
page_ref_pat = PageRef.create_pattern(string)
if page_ref_pat:
pats += ["#"+pat for pat in re.split(RE_SPLIT_OR, page_ref_pat)]
return "|".join(pats)
def __eq__(self, other):
return type(self)==type(other) and self.title == other.title
class BlockRef(BlockContentItem):
def __init__(self, uid, roam_db=None, string=None):
self.uid = uid
self.roam_db = roam_db
self.string = string
@classmethod
def from_string(cls, string, *args, **kwargs):
super().from_string(string)
roam_db = kwargs.get("roam_db", None)
return cls(string[2:-2], roam_db=roam_db, string=string)
def to_string(self, expand=False):
if expand:
block = self.get_referenced_block()
if block:
return block.to_string()
if self.string:
return self.string
else:
return f"(({self.uid}))"
def to_html(self, *arg, **kwargs):
block = self.get_referenced_block()
text = block.to_html() if block else html.escape(self.to_string())
return '<div class="rm-block-ref"><span>%s</span></div>' % text
def get_tags(self):
return []
@classmethod
def create_pattern(cls, string=None):
return "\(\([\w\d\-_]{9}\)\)"
def get_referenced_block(self):
if self.roam_db:
return self.roam_db.query_by_uid(self.uid)
def __eq__(self, other):
return type(self)==type(other) and self.uid==other.uid
class Url(BlockContentItem):
def __init__(self, text):
self.text = text
@classmethod
def from_string(cls, string, **kwargs):
super().from_string(string)
return cls(string)
def to_string(self):
return self.text
def to_html(self, *arg, **kwargs):
return f'<span><a href="{html.escape(self.text)}">{html.escape(self.text)}</a></span>'
def __eq__(self, other):
return type(self)==type(other) and self.text==other.text
class String(BlockContentItem):
def __init__(self, string):
if type(string) == String:
string == string.to_string()
self.string = string
@classmethod
def from_string(cls, string, validate=True, **kwargs):
super().from_string(string, validate)
return cls(string)
@classmethod
def validate_string(cls, string):
return True
def to_html(self, *arg, **kwargs):
return html.escape(self.to_string()).replace("\n", "<br>")
def get_tags(self):
return []
def to_string(self):
return self.string
def __eq__(self, other):
return type(self)==type(other) and self.string==other.string
class Attribute(BlockContentItem):
def __init__(self, title, string=None):
self.title = title
self.string = string
@classmethod
def from_string(cls, string, validate=True, **kwargs):
super().from_string(string, validate)
return cls(string[:-2], string)
@classmethod
def validate_string(cls, string):
pat = re.compile(cls.create_pattern(string)+"$")
if re.match(pat, string):
return True
return False
@classmethod
def create_pattern(cls, string=None):
return "^(?:(?<!:)[^:])+::"
def to_html(self, *arg, **kwargs):
return '<span><strong>%s:</strong></span>' % html.escape(self.title)
def get_tags(self):
return [self.title]
def to_string(self):
if self.string:
return self.string
return self.title+"::"
def __eq__(self, other):
return type(self)==type(other) and self.title==other.title
| true | true |
f71ed1eb7998078b776859f34f0555bc09cda199 | 712 | py | Python | soluciones/empresa_pagos/main.py | carlosviveros/Soluciones | 115f4fa929c7854ca497e4c994352adc64565456 | [
"MIT"
] | 4 | 2021-12-14T23:51:25.000Z | 2022-03-24T11:14:00.000Z | soluciones/empresa_pagos/main.py | leugimkm/Soluciones | d71601c8d9b5e86e926f48d9e49462af8a956b6d | [
"MIT"
] | null | null | null | soluciones/empresa_pagos/main.py | leugimkm/Soluciones | d71601c8d9b5e86e926f48d9e49462af8a956b6d | [
"MIT"
] | 5 | 2021-11-10T06:49:50.000Z | 2022-03-24T01:42:28.000Z | """AyudaEnPython: https://www.facebook.com/groups/ayudapython
"""
# pip install prototools
from prototools import Menu
from prototools.colorize import magenta, cyan, red
from empresa import Empresa
def main():
empresa = Empresa()
menu = Menu(
cyan("Menu"),
exit_option_text=red("Salir"),
exit_option_color=red,
)
menu.add_options(
("Agregar trabajadores", empresa.agregar),
("Mostrar Reporte", empresa.reporte),
("Eliminar trabajadores", empresa.eliminar),
)
menu.settings(
style="double",
color=magenta,
options_color=cyan,
header_bottom=True,
)
menu.run()
if __name__ == "__main__":
main() | 22.25 | 61 | 0.630618 |
from prototools import Menu
from prototools.colorize import magenta, cyan, red
from empresa import Empresa
def main():
empresa = Empresa()
menu = Menu(
cyan("Menu"),
exit_option_text=red("Salir"),
exit_option_color=red,
)
menu.add_options(
("Agregar trabajadores", empresa.agregar),
("Mostrar Reporte", empresa.reporte),
("Eliminar trabajadores", empresa.eliminar),
)
menu.settings(
style="double",
color=magenta,
options_color=cyan,
header_bottom=True,
)
menu.run()
if __name__ == "__main__":
main() | true | true |
f71ed277dd99ea3ca81a3df1f5f8136a8aeffc8e | 5,023 | py | Python | proteinhmmvisualize.py | mitenjain/protpore | 06b779473c4bf9f9c8c4305aa08873ae75386886 | [
"MIT"
] | null | null | null | proteinhmmvisualize.py | mitenjain/protpore | 06b779473c4bf9f9c8c4305aa08873ae75386886 | [
"MIT"
] | null | null | null | proteinhmmvisualize.py | mitenjain/protpore | 06b779473c4bf9f9c8c4305aa08873ae75386886 | [
"MIT"
] | null | null | null | '''
Author: Hannah Meyers
This file contains the experiment code for attempting to model
protein nanopore traces via HMMs. Please see inline comments
for an explanation of what each piece of the code is doing.
'''
from __future__ import print_function
from PyPore.parsers import *
from PyPore.DataTypes import *
from hmm import *
from yahmm import *
import math
import matplotlib.pyplot as plt
import itertools as it
import glob
import seaborn as sns
import sys
import pandas as pd
from proteinDists import *
from scipy.stats import kde
#Experiment data files. The first set before the break are all experiment files from
#the same day of data collection. Files after the break are each from different days.
filenames = [
#"ProteinFiles/12907001-s05.abf"
#"ProteinFiles/13311001-s05.abf"
"experiment_data/13n25010-s05.abf",
#"experiment_data/13n25001-s05.abf",
#"experiment_data/13n25005-s05.abf",
#"experiment_data/13n25007-s05.abf",
#"experiment_data/13n25012-s05.abf",#bad
#----#
#"experiment_data/13n12001-s05.abf",
#"experiment_data/13n13006-s05.abf",
#"experiment_data/14131001-s05.abf",
#---#
#"experiment_data/14410016-s05.abf"
]
#Inserts are uniform across the range of current we expect to see in an event
insert1 = MultivariateDistribution( [ UniformDistribution( 0, 40 ), UniformDistribution( 0, 10 ) ] )
#Create first local model
profile_means = pd.read_csv( 'profile_data/profilemeans.csv' )
profile_stds = pd.read_csv( 'profile_data/profilestds.csv' )
#Convert CSV data to distribution objects
dists_means = [ NormalDistribution( profile_means[col].mean(), profile_means[col].std() ) for col in profile_means ]
dists_stds = [ LogNormalDistribution( np.log( profile_stds[col] ).mean(), np.log( profile_stds[col] ).std() ) for col in profile_stds ]
#build multivariate profile with distributions of means/std deviations
profile = [ MultivariateDistribution([ mean, std ]) for mean, std in it.izip( dists_means, dists_stds ) ]
#profile[5] = MultivariateDistribution([ ExtremeValueDistribution( 20, 10 ), LogNormalDistribution( np.log(4.5), np.log(3.5) ) ])
#print(profile[5])
#list of board functions corresponds to the 11 profile positions
boardlist = [ProteinDomainBoard2]*2 +[ProteinDomainBoard]*9
#build model
model = ModularDomainProfileModel2( boardlist, profile, "ClpXProfile-{}".format( len(profile) ), insert1)
#iteration for applying model to events in filenames list and plotting
for file in it.imap( File, filenames ):
x = 1
print(file.filename)
#Events must drop below this threshold
threshold = 38
rules = [lambda event: event.duration > 1000000,
lambda event: event.min > -5,
lambda event: event.max < threshold]
file.parse( lambda_event_parser( threshold=threshold, rules = rules ) )
for event in file.events:
event.filter()
print(event)
#false_positive_rate controls the number of segments that will be created by the segmenter
event.parse( SpeedyStatSplit( min_width=5, false_positive_rate=1e-65, cutoff_freq = 2000) )
#print(event.segments)
#Apply HMM to event
_, hidden_states = model.viterbi( np.array( [[ seg.mean, seg.std] for seg in event.segments ] ) )
if hidden_states != None:
#First subplot is event + segmentation
plt.figure( figsize=(20, 8))
plt.subplot( 311 )
event.plot( color='cycle' )
#Second subplot is event + HMM
plt.subplot( 312 )
event.plot( color='hmm', hmm=model, hidden_states=hidden_states, cmap='Set1' )
#Final subplot is color cycle with profile means
#this subplot is currently inaccurate as it only plots the first profile
#furthermore, there was a bug in PyPore when I started on this that makes the color cycle
#not match up to the HMM colors. I am unsure if the bug has been fixed since then.
ax = plt.subplot( 313 )
plt.imshow( [ np.arange( 0., len(profile) ) / len(profile) ], interpolation='nearest', cmap="Set1" )
plt.grid( False )
means = [ d.parameters[0][0].parameters[0] for d in profile ]
for i, mean in enumerate( means ):
plt.text( i-0.2, 0.1, str( round(mean, 1) ), fontsize=12 )
#Output HMM state path to output.txt file
outputtext = 'output' + str(x) + '.txt'
f = open(outputtext, 'w')
for i, state in enumerate( hidden_states ):
f.write(state[1].name+"\n")
f.close()
#s = file.filename[16:] +'fp55s' + str(x)
s = 'backslip' + str(x)
#save figure with name s + counter to prevent name duplications
plt.savefig(s)
x += 1
#show figure
#plt.show()
file.close()
| 37.207407 | 136 | 0.651005 | from __future__ import print_function
from PyPore.parsers import *
from PyPore.DataTypes import *
from hmm import *
from yahmm import *
import math
import matplotlib.pyplot as plt
import itertools as it
import glob
import seaborn as sns
import sys
import pandas as pd
from proteinDists import *
from scipy.stats import kde
filenames = [
"experiment_data/13n25010-s05.abf",
insert1 = MultivariateDistribution( [ UniformDistribution( 0, 40 ), UniformDistribution( 0, 10 ) ] )
profile_means = pd.read_csv( 'profile_data/profilemeans.csv' )
profile_stds = pd.read_csv( 'profile_data/profilestds.csv' )
dists_means = [ NormalDistribution( profile_means[col].mean(), profile_means[col].std() ) for col in profile_means ]
dists_stds = [ LogNormalDistribution( np.log( profile_stds[col] ).mean(), np.log( profile_stds[col] ).std() ) for col in profile_stds ]
profile = [ MultivariateDistribution([ mean, std ]) for mean, std in it.izip( dists_means, dists_stds ) ]
boardlist = [ProteinDomainBoard2]*2 +[ProteinDomainBoard]*9
model = ModularDomainProfileModel2( boardlist, profile, "ClpXProfile-{}".format( len(profile) ), insert1)
for file in it.imap( File, filenames ):
x = 1
print(file.filename)
threshold = 38
rules = [lambda event: event.duration > 1000000,
lambda event: event.min > -5,
lambda event: event.max < threshold]
file.parse( lambda_event_parser( threshold=threshold, rules = rules ) )
for event in file.events:
event.filter()
print(event)
event.parse( SpeedyStatSplit( min_width=5, false_positive_rate=1e-65, cutoff_freq = 2000) )
_, hidden_states = model.viterbi( np.array( [[ seg.mean, seg.std] for seg in event.segments ] ) )
if hidden_states != None:
plt.figure( figsize=(20, 8))
plt.subplot( 311 )
event.plot( color='cycle' )
plt.subplot( 312 )
event.plot( color='hmm', hmm=model, hidden_states=hidden_states, cmap='Set1' )
ax = plt.subplot( 313 )
plt.imshow( [ np.arange( 0., len(profile) ) / len(profile) ], interpolation='nearest', cmap="Set1" )
plt.grid( False )
means = [ d.parameters[0][0].parameters[0] for d in profile ]
for i, mean in enumerate( means ):
plt.text( i-0.2, 0.1, str( round(mean, 1) ), fontsize=12 )
outputtext = 'output' + str(x) + '.txt'
f = open(outputtext, 'w')
for i, state in enumerate( hidden_states ):
f.write(state[1].name+"\n")
f.close()
s = 'backslip' + str(x)
plt.savefig(s)
x += 1
file.close()
| true | true |
f71ed2a62255fdb093417848fcf1c4f4c52ebe9f | 1,637 | py | Python | kelbyapp/settings.py | metatroid/kelbyapp | 6d575f6aac38832e52f520d3a7f072f43c746670 | [
"BSD-3-Clause"
] | null | null | null | kelbyapp/settings.py | metatroid/kelbyapp | 6d575f6aac38832e52f520d3a7f072f43c746670 | [
"BSD-3-Clause"
] | null | null | null | kelbyapp/settings.py | metatroid/kelbyapp | 6d575f6aac38832e52f520d3a7f072f43c746670 | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
"""Application configuration."""
import os
class Config(object):
"""Base configuration."""
SECRET_KEY = "uidhbao7dbaudw8yebqnmrbqiyrnqxurgqhrqeioryq89894873264234bvm234l234pu2347" #os.environ.get('KELBYAPP_SECRET', 'secret-key') # TODO: Change me
APP_DIR = os.path.abspath(os.path.dirname(__file__)) # This directory
PROJECT_ROOT = os.path.abspath(os.path.join(APP_DIR, os.pardir))
BCRYPT_LOG_ROUNDS = 13
ASSETS_DEBUG = False
DEBUG_TB_ENABLED = False # Disable Debug toolbar
DEBUG_TB_INTERCEPT_REDIRECTS = False
CACHE_TYPE = 'simple' # Can be "memcached", "redis", etc.
SQLALCHEMY_TRACK_MODIFICATIONS = False
class ProdConfig(Config):
"""Production configuration."""
ENV = 'prod'
DEBUG = False
SQLALCHEMY_DATABASE_URI = 'postgresql://localhost/example' # TODO: Change me
DEBUG_TB_ENABLED = False # Disable Debug toolbar
class DevConfig(Config):
"""Development configuration."""
ENV = 'dev'
DEBUG = True
DB_NAME = 'dev.db'
# Put the db file in project root
DB_PATH = os.path.join(Config.PROJECT_ROOT, DB_NAME)
SQLALCHEMY_DATABASE_URI = 'sqlite:///{0}'.format(DB_PATH)
DEBUG_TB_ENABLED = True
ASSETS_DEBUG = True # Don't bundle/minify static assets
CACHE_TYPE = 'simple' # Can be "memcached", "redis", etc.
class TestConfig(Config):
"""Test configuration."""
TESTING = True
DEBUG = True
SQLALCHEMY_DATABASE_URI = 'sqlite://'
BCRYPT_LOG_ROUNDS = 4 # For faster tests; needs at least 4 to avoid "ValueError: Invalid rounds"
WTF_CSRF_ENABLED = False # Allows form testing
| 32.098039 | 160 | 0.695174 |
import os
class Config(object):
SECRET_KEY = "uidhbao7dbaudw8yebqnmrbqiyrnqxurgqhrqeioryq89894873264234bvm234l234pu2347" .path.abspath(os.path.dirname(__file__))
PROJECT_ROOT = os.path.abspath(os.path.join(APP_DIR, os.pardir))
BCRYPT_LOG_ROUNDS = 13
ASSETS_DEBUG = False
DEBUG_TB_ENABLED = False
DEBUG_TB_INTERCEPT_REDIRECTS = False
CACHE_TYPE = 'simple'
SQLALCHEMY_TRACK_MODIFICATIONS = False
class ProdConfig(Config):
ENV = 'prod'
DEBUG = False
SQLALCHEMY_DATABASE_URI = 'postgresql://localhost/example'
DEBUG_TB_ENABLED = False
class DevConfig(Config):
ENV = 'dev'
DEBUG = True
DB_NAME = 'dev.db'
DB_PATH = os.path.join(Config.PROJECT_ROOT, DB_NAME)
SQLALCHEMY_DATABASE_URI = 'sqlite:///{0}'.format(DB_PATH)
DEBUG_TB_ENABLED = True
ASSETS_DEBUG = True
CACHE_TYPE = 'simple' # Can be "memcached", "redis", etc.
class TestConfig(Config):
TESTING = True
DEBUG = True
SQLALCHEMY_DATABASE_URI = 'sqlite://'
BCRYPT_LOG_ROUNDS = 4 # For faster tests; needs at least 4 to avoid "ValueError: Invalid rounds"
WTF_CSRF_ENABLED = False # Allows form testing
| true | true |
f71ed2c3f4f2ed6f53d55f48bf58882fc442030c | 214 | py | Python | py2cytoscape_doc/py2cytoscape/data/util_style.py | idekerlab/cyrest-examples | 8146c40f280d996fe14d0717038012f2507aba4e | [
"MIT"
] | 1 | 2020-09-07T00:32:12.000Z | 2020-09-07T00:32:12.000Z | py2cytoscape_doc/py2cytoscape/data/util_style.py | idekerlab/cyrest-examples | 8146c40f280d996fe14d0717038012f2507aba4e | [
"MIT"
] | null | null | null | py2cytoscape_doc/py2cytoscape/data/util_style.py | idekerlab/cyrest-examples | 8146c40f280d996fe14d0717038012f2507aba4e | [
"MIT"
] | 5 | 2018-05-31T16:55:31.000Z | 2021-01-19T07:00:00.000Z | # -*- coding: utf-8 -*-
"""
"""
class StyleUtil(object):
"""
"""
@staticmethod
def create_gradient(column, vp=None):
"""
:param column:
:param vp:
"""
pass
| 13.375 | 41 | 0.439252 |
class StyleUtil(object):
@staticmethod
def create_gradient(column, vp=None):
pass
| true | true |
f71ed36d488977b309b3e7baf242d2d9d98bcc2e | 4,465 | py | Python | profiles_api/views.py | Ankitsingh24/Project-REST-API | 4aac3e819e2aee49a117c79d2f151b477d75d7eb | [
"MIT"
] | null | null | null | profiles_api/views.py | Ankitsingh24/Project-REST-API | 4aac3e819e2aee49a117c79d2f151b477d75d7eb | [
"MIT"
] | null | null | null | profiles_api/views.py | Ankitsingh24/Project-REST-API | 4aac3e819e2aee49a117c79d2f151b477d75d7eb | [
"MIT"
] | null | null | null | from rest_framework.views import APIView
from rest_framework.response import Response
from rest_framework import status
from rest_framework import viewsets
from rest_framework.authentication import TokenAuthentication
from rest_framework import filters
from rest_framework.authtoken.views import ObtainAuthToken
from rest_framework.settings import api_settings
from rest_framework.permissions import IsAuthenticated
from profiles_api import serializers
from profiles_api import models
from profiles_api import permissions
class HelloApiView(APIView):
"""Test API View"""
serializer_class = serializers.HelloSerializer
def get(self, request, format=None):
"""Returns a list of APIView features"""
an_apiview = [
'Uses HTTP methods as functions (get, post, patch, put, delete)',
'Is similar to a traditional Django View',
'Gives you the most control over your logic',
'Is mapped manually to URLs',
]
return Response({'message': 'Hello!', 'an_apiview': an_apiview})
def post(self, request):
"""Create a hello message with our name"""
serializer = self.serializer_class(data=request.data)
if serializer.is_valid():
name = serializer.validated_data.get('name')
message = f'Hello {name}!'
return Response({'message': message})
else:
return Response(
serializer.errors,
status=status.HTTP_400_BAD_REQUEST
)
def put(self, request, pk=None):
"""Handle updating an object"""
return Response({'method': 'PUT'})
def patch(self, request, pk=None):
"""Handle partial update of object"""
return Response({'method': 'PATCH'})
def delete(self, request, pk=None):
"""Delete an object"""
return Response({'method': 'DELETE'})
class HelloViewSet(viewsets.ViewSet):
"""Test API ViewSet"""
serializer_class = serializers.HelloSerializer
def list(self, request):
"""Return a hello message."""
a_viewset = [
'Uses actions (list, create, retrieve, update, partial_update)',
'Automatically maps to URLS using Routers',
'Provides more functionality with less code',
]
return Response({'message': 'Hello!', 'a_viewset': a_viewset})
def create(self, request):
"""Create a new hello message."""
serializer = self.serializer_class(data=request.data)
if serializer.is_valid():
name = serializer.validated_data.get('name')
message = f'Hello {name}!'
return Response({'message': message})
else:
return Response(
serializer.errors,
status=status.HTTP_400_BAD_REQUEST
)
def retrieve(self, request, pk=None):
"""Handle getting an object by its ID"""
return Response({'http_method': 'GET'})
def update(self, request, pk=None):
"""Handle updating an object"""
return Response({'http_method': 'PUT'})
def partial_update(self, request, pk=None):
"""Handle updating part of an object"""
return Response({'http_method': 'PATCH'})
def destroy(self, request, pk=None):
"""Handle removing an object"""
return Response({'http_method': 'DELETE'})
class UserProfileViewSet(viewsets.ModelViewSet):
"""Handle creating and updating profiles"""
serializer_class = serializers.UserProfileSerializer
queryset = models.UserProfile.objects.all()
authentication_classes = (TokenAuthentication,)
permission_classes = (permissions.UpdateOwnProfile,)
filter_backends = (filters.SearchFilter,)
search_fields = ('name', 'email',)
class UserLoginApiView(ObtainAuthToken):
"""Handle creating user authentication tokens"""
renderer_classes = api_settings.DEFAULT_RENDERER_CLASSES
class UserProfileFeedViewSet(viewsets.ModelViewSet):
"""Handles creating, reading and updating profile feed items"""
authentication_classes = (TokenAuthentication,)
serializer_class = serializers.ProfileFeedItemSerializer
queryset = models.ProfileFeedItem.objects.all()
permission_classes = (permissions.UpdateOwnStatus, IsAuthenticated)
def perform_create(self, serializer):
"""Sets the user profile to the logged in user"""
serializer.save(user_profile=self.request.user)
| 33.320896 | 77 | 0.664726 | from rest_framework.views import APIView
from rest_framework.response import Response
from rest_framework import status
from rest_framework import viewsets
from rest_framework.authentication import TokenAuthentication
from rest_framework import filters
from rest_framework.authtoken.views import ObtainAuthToken
from rest_framework.settings import api_settings
from rest_framework.permissions import IsAuthenticated
from profiles_api import serializers
from profiles_api import models
from profiles_api import permissions
class HelloApiView(APIView):
serializer_class = serializers.HelloSerializer
def get(self, request, format=None):
an_apiview = [
'Uses HTTP methods as functions (get, post, patch, put, delete)',
'Is similar to a traditional Django View',
'Gives you the most control over your logic',
'Is mapped manually to URLs',
]
return Response({'message': 'Hello!', 'an_apiview': an_apiview})
def post(self, request):
serializer = self.serializer_class(data=request.data)
if serializer.is_valid():
name = serializer.validated_data.get('name')
message = f'Hello {name}!'
return Response({'message': message})
else:
return Response(
serializer.errors,
status=status.HTTP_400_BAD_REQUEST
)
def put(self, request, pk=None):
return Response({'method': 'PUT'})
def patch(self, request, pk=None):
return Response({'method': 'PATCH'})
def delete(self, request, pk=None):
return Response({'method': 'DELETE'})
class HelloViewSet(viewsets.ViewSet):
serializer_class = serializers.HelloSerializer
def list(self, request):
a_viewset = [
'Uses actions (list, create, retrieve, update, partial_update)',
'Automatically maps to URLS using Routers',
'Provides more functionality with less code',
]
return Response({'message': 'Hello!', 'a_viewset': a_viewset})
def create(self, request):
serializer = self.serializer_class(data=request.data)
if serializer.is_valid():
name = serializer.validated_data.get('name')
message = f'Hello {name}!'
return Response({'message': message})
else:
return Response(
serializer.errors,
status=status.HTTP_400_BAD_REQUEST
)
def retrieve(self, request, pk=None):
return Response({'http_method': 'GET'})
def update(self, request, pk=None):
return Response({'http_method': 'PUT'})
def partial_update(self, request, pk=None):
return Response({'http_method': 'PATCH'})
def destroy(self, request, pk=None):
return Response({'http_method': 'DELETE'})
class UserProfileViewSet(viewsets.ModelViewSet):
serializer_class = serializers.UserProfileSerializer
queryset = models.UserProfile.objects.all()
authentication_classes = (TokenAuthentication,)
permission_classes = (permissions.UpdateOwnProfile,)
filter_backends = (filters.SearchFilter,)
search_fields = ('name', 'email',)
class UserLoginApiView(ObtainAuthToken):
renderer_classes = api_settings.DEFAULT_RENDERER_CLASSES
class UserProfileFeedViewSet(viewsets.ModelViewSet):
authentication_classes = (TokenAuthentication,)
serializer_class = serializers.ProfileFeedItemSerializer
queryset = models.ProfileFeedItem.objects.all()
permission_classes = (permissions.UpdateOwnStatus, IsAuthenticated)
def perform_create(self, serializer):
serializer.save(user_profile=self.request.user)
| true | true |
f71ed394f273d02af2d30bdc28d867676cfc930d | 15,202 | py | Python | geopandas/tools/tests/test_clip.py | cloudscenes/geopandas | 409d8f0a1562df088ce28c39a48fe4df669660fe | [
"BSD-3-Clause"
] | 2,914 | 2015-01-01T14:27:43.000Z | 2022-03-31T22:26:39.000Z | geopandas/tools/tests/test_clip.py | cloudscenes/geopandas | 409d8f0a1562df088ce28c39a48fe4df669660fe | [
"BSD-3-Clause"
] | 2,040 | 2015-01-16T11:34:26.000Z | 2022-03-31T12:13:39.000Z | geopandas/tools/tests/test_clip.py | cloudscenes/geopandas | 409d8f0a1562df088ce28c39a48fe4df669660fe | [
"BSD-3-Clause"
] | 758 | 2015-01-21T20:23:32.000Z | 2022-03-31T17:22:53.000Z | """Tests for the clip module."""
import warnings
from distutils.version import LooseVersion
import numpy as np
import pandas as pd
import shapely
from shapely.geometry import (
Polygon,
Point,
LineString,
LinearRing,
GeometryCollection,
MultiPoint,
)
import geopandas
from geopandas import GeoDataFrame, GeoSeries, clip
from geopandas.testing import assert_geodataframe_equal, assert_geoseries_equal
import pytest
pytestmark = pytest.mark.skip_no_sindex
pandas_133 = pd.__version__ == LooseVersion("1.3.3")
@pytest.fixture
def point_gdf():
"""Create a point GeoDataFrame."""
pts = np.array([[2, 2], [3, 4], [9, 8], [-12, -15]])
gdf = GeoDataFrame([Point(xy) for xy in pts], columns=["geometry"], crs="EPSG:3857")
return gdf
@pytest.fixture
def pointsoutside_nooverlap_gdf():
"""Create a point GeoDataFrame. Its points are all outside the single
rectangle, and its bounds are outside the single rectangle's."""
pts = np.array([[5, 15], [15, 15], [15, 20]])
gdf = GeoDataFrame([Point(xy) for xy in pts], columns=["geometry"], crs="EPSG:3857")
return gdf
@pytest.fixture
def pointsoutside_overlap_gdf():
"""Create a point GeoDataFrame. Its points are all outside the single
rectangle, and its bounds are overlapping the single rectangle's."""
pts = np.array([[5, 15], [15, 15], [15, 5]])
gdf = GeoDataFrame([Point(xy) for xy in pts], columns=["geometry"], crs="EPSG:3857")
return gdf
@pytest.fixture
def single_rectangle_gdf():
"""Create a single rectangle for clipping."""
poly_inters = Polygon([(0, 0), (0, 10), (10, 10), (10, 0), (0, 0)])
gdf = GeoDataFrame([1], geometry=[poly_inters], crs="EPSG:3857")
gdf["attr2"] = "site-boundary"
return gdf
@pytest.fixture
def larger_single_rectangle_gdf():
"""Create a slightly larger rectangle for clipping.
The smaller single rectangle is used to test the edge case where slivers
are returned when you clip polygons. This fixture is larger which
eliminates the slivers in the clip return.
"""
poly_inters = Polygon([(-5, -5), (-5, 15), (15, 15), (15, -5), (-5, -5)])
gdf = GeoDataFrame([1], geometry=[poly_inters], crs="EPSG:3857")
gdf["attr2"] = ["study area"]
return gdf
@pytest.fixture
def buffered_locations(point_gdf):
"""Buffer points to create a multi-polygon."""
buffered_locs = point_gdf
buffered_locs["geometry"] = buffered_locs.buffer(4)
buffered_locs["type"] = "plot"
return buffered_locs
@pytest.fixture
def donut_geometry(buffered_locations, single_rectangle_gdf):
"""Make a geometry with a hole in the middle (a donut)."""
donut = geopandas.overlay(
buffered_locations, single_rectangle_gdf, how="symmetric_difference"
)
return donut
@pytest.fixture
def two_line_gdf():
"""Create Line Objects For Testing"""
linea = LineString([(1, 1), (2, 2), (3, 2), (5, 3)])
lineb = LineString([(3, 4), (5, 7), (12, 2), (10, 5), (9, 7.5)])
gdf = GeoDataFrame([1, 2], geometry=[linea, lineb], crs="EPSG:3857")
return gdf
@pytest.fixture
def multi_poly_gdf(donut_geometry):
"""Create a multi-polygon GeoDataFrame."""
multi_poly = donut_geometry.unary_union
out_df = GeoDataFrame(geometry=GeoSeries(multi_poly), crs="EPSG:3857")
out_df["attr"] = ["pool"]
return out_df
@pytest.fixture
def multi_line(two_line_gdf):
"""Create a multi-line GeoDataFrame.
This GDF has one multiline and one regular line."""
# Create a single and multi line object
multiline_feat = two_line_gdf.unary_union
linec = LineString([(2, 1), (3, 1), (4, 1), (5, 2)])
out_df = GeoDataFrame(geometry=GeoSeries([multiline_feat, linec]), crs="EPSG:3857")
out_df["attr"] = ["road", "stream"]
return out_df
@pytest.fixture
def multi_point(point_gdf):
"""Create a multi-point GeoDataFrame."""
multi_point = point_gdf.unary_union
out_df = GeoDataFrame(
geometry=GeoSeries(
[multi_point, Point(2, 5), Point(-11, -14), Point(-10, -12)]
),
crs="EPSG:3857",
)
out_df["attr"] = ["tree", "another tree", "shrub", "berries"]
return out_df
@pytest.fixture
def mixed_gdf():
"""Create a Mixed Polygon and LineString For Testing"""
point = Point([(2, 3), (11, 4), (7, 2), (8, 9), (1, 13)])
line = LineString([(1, 1), (2, 2), (3, 2), (5, 3), (12, 1)])
poly = Polygon([(3, 4), (5, 2), (12, 2), (10, 5), (9, 7.5)])
ring = LinearRing([(1, 1), (2, 2), (3, 2), (5, 3), (12, 1)])
gdf = GeoDataFrame(
[1, 2, 3, 4], geometry=[point, poly, line, ring], crs="EPSG:3857"
)
return gdf
@pytest.fixture
def geomcol_gdf():
"""Create a Mixed Polygon and LineString For Testing"""
point = Point([(2, 3), (11, 4), (7, 2), (8, 9), (1, 13)])
poly = Polygon([(3, 4), (5, 2), (12, 2), (10, 5), (9, 7.5)])
coll = GeometryCollection([point, poly])
gdf = GeoDataFrame([1], geometry=[coll], crs="EPSG:3857")
return gdf
@pytest.fixture
def sliver_line():
"""Create a line that will create a point when clipped."""
linea = LineString([(10, 5), (13, 5), (15, 5)])
lineb = LineString([(1, 1), (2, 2), (3, 2), (5, 3), (12, 1)])
gdf = GeoDataFrame([1, 2], geometry=[linea, lineb], crs="EPSG:3857")
return gdf
def test_not_gdf(single_rectangle_gdf):
"""Non-GeoDataFrame inputs raise attribute errors."""
with pytest.raises(TypeError):
clip((2, 3), single_rectangle_gdf)
with pytest.raises(TypeError):
clip(single_rectangle_gdf, (2, 3))
def test_returns_gdf(point_gdf, single_rectangle_gdf):
"""Test that function returns a GeoDataFrame (or GDF-like) object."""
out = clip(point_gdf, single_rectangle_gdf)
assert isinstance(out, GeoDataFrame)
def test_returns_series(point_gdf, single_rectangle_gdf):
"""Test that function returns a GeoSeries if GeoSeries is passed."""
out = clip(point_gdf.geometry, single_rectangle_gdf)
assert isinstance(out, GeoSeries)
def test_non_overlapping_geoms():
"""Test that a bounding box returns empty if the extents don't overlap"""
unit_box = Polygon([(0, 0), (0, 1), (1, 1), (1, 0), (0, 0)])
unit_gdf = GeoDataFrame([1], geometry=[unit_box], crs="EPSG:3857")
non_overlapping_gdf = unit_gdf.copy()
non_overlapping_gdf = non_overlapping_gdf.geometry.apply(
lambda x: shapely.affinity.translate(x, xoff=20)
)
out = clip(unit_gdf, non_overlapping_gdf)
assert_geodataframe_equal(out, unit_gdf.iloc[:0])
out2 = clip(unit_gdf.geometry, non_overlapping_gdf)
assert_geoseries_equal(out2, GeoSeries(crs=unit_gdf.crs))
def test_clip_points(point_gdf, single_rectangle_gdf):
"""Test clipping a points GDF with a generic polygon geometry."""
clip_pts = clip(point_gdf, single_rectangle_gdf)
pts = np.array([[2, 2], [3, 4], [9, 8]])
exp = GeoDataFrame([Point(xy) for xy in pts], columns=["geometry"], crs="EPSG:3857")
assert_geodataframe_equal(clip_pts, exp)
def test_clip_points_geom_col_rename(point_gdf, single_rectangle_gdf):
"""Test clipping a points GDF with a generic polygon geometry."""
point_gdf_geom_col_rename = point_gdf.rename_geometry("geometry2")
clip_pts = clip(point_gdf_geom_col_rename, single_rectangle_gdf)
pts = np.array([[2, 2], [3, 4], [9, 8]])
exp = GeoDataFrame(
[Point(xy) for xy in pts],
columns=["geometry2"],
crs="EPSG:3857",
geometry="geometry2",
)
assert_geodataframe_equal(clip_pts, exp)
def test_clip_poly(buffered_locations, single_rectangle_gdf):
"""Test clipping a polygon GDF with a generic polygon geometry."""
clipped_poly = clip(buffered_locations, single_rectangle_gdf)
assert len(clipped_poly.geometry) == 3
assert all(clipped_poly.geom_type == "Polygon")
def test_clip_poly_geom_col_rename(buffered_locations, single_rectangle_gdf):
"""Test clipping a polygon GDF with a generic polygon geometry."""
poly_gdf_geom_col_rename = buffered_locations.rename_geometry("geometry2")
clipped_poly = clip(poly_gdf_geom_col_rename, single_rectangle_gdf)
assert len(clipped_poly.geometry) == 3
assert "geometry" not in clipped_poly.keys()
assert "geometry2" in clipped_poly.keys()
def test_clip_poly_series(buffered_locations, single_rectangle_gdf):
"""Test clipping a polygon GDF with a generic polygon geometry."""
clipped_poly = clip(buffered_locations.geometry, single_rectangle_gdf)
assert len(clipped_poly) == 3
assert all(clipped_poly.geom_type == "Polygon")
@pytest.mark.xfail(pandas_133, reason="Regression in pandas 1.3.3 (GH #2101)")
def test_clip_multipoly_keep_slivers(multi_poly_gdf, single_rectangle_gdf):
"""Test a multi poly object where the return includes a sliver.
Also the bounds of the object should == the bounds of the clip object
if they fully overlap (as they do in these fixtures)."""
clipped = clip(multi_poly_gdf, single_rectangle_gdf)
assert np.array_equal(clipped.total_bounds, single_rectangle_gdf.total_bounds)
# Assert returned data is a geometry collection given sliver geoms
assert "GeometryCollection" in clipped.geom_type[0]
@pytest.mark.xfail(pandas_133, reason="Regression in pandas 1.3.3 (GH #2101)")
def test_clip_multipoly_keep_geom_type(multi_poly_gdf, single_rectangle_gdf):
"""Test a multi poly object where the return includes a sliver.
Also the bounds of the object should == the bounds of the clip object
if they fully overlap (as they do in these fixtures)."""
clipped = clip(multi_poly_gdf, single_rectangle_gdf, keep_geom_type=True)
assert np.array_equal(clipped.total_bounds, single_rectangle_gdf.total_bounds)
# Assert returned data is a not geometry collection
assert (clipped.geom_type == "Polygon").any()
def test_clip_single_multipoly_no_extra_geoms(
buffered_locations, larger_single_rectangle_gdf
):
"""When clipping a multi-polygon feature, no additional geom types
should be returned."""
multi = buffered_locations.dissolve(by="type").reset_index()
clipped = clip(multi, larger_single_rectangle_gdf)
assert clipped.geom_type[0] == "Polygon"
def test_clip_multiline(multi_line, single_rectangle_gdf):
"""Test that clipping a multiline feature with a poly returns expected output."""
clipped = clip(multi_line, single_rectangle_gdf)
assert clipped.geom_type[0] == "MultiLineString"
def test_clip_multipoint(single_rectangle_gdf, multi_point):
"""Clipping a multipoint feature with a polygon works as expected.
should return a geodataframe with a single multi point feature"""
clipped = clip(multi_point, single_rectangle_gdf)
assert clipped.geom_type[0] == "MultiPoint"
assert hasattr(clipped, "attr")
# All points should intersect the clip geom
assert len(clipped) == 2
clipped_mutltipoint = MultiPoint(
[
Point(2, 2),
Point(3, 4),
Point(9, 8),
]
)
assert clipped.iloc[0].geometry.wkt == clipped_mutltipoint.wkt
assert all(clipped.intersects(single_rectangle_gdf.unary_union))
def test_clip_lines(two_line_gdf, single_rectangle_gdf):
"""Test what happens when you give the clip_extent a line GDF."""
clip_line = clip(two_line_gdf, single_rectangle_gdf)
assert len(clip_line.geometry) == 2
def test_clip_with_multipolygon(buffered_locations, single_rectangle_gdf):
"""Test clipping a polygon with a multipolygon."""
multi = buffered_locations.dissolve(by="type").reset_index()
clipped = clip(single_rectangle_gdf, multi)
assert clipped.geom_type[0] == "Polygon"
def test_mixed_geom(mixed_gdf, single_rectangle_gdf):
"""Test clipping a mixed GeoDataFrame"""
clipped = clip(mixed_gdf, single_rectangle_gdf)
assert (
clipped.geom_type[0] == "Point"
and clipped.geom_type[1] == "Polygon"
and clipped.geom_type[2] == "LineString"
)
def test_mixed_series(mixed_gdf, single_rectangle_gdf):
"""Test clipping a mixed GeoSeries"""
clipped = clip(mixed_gdf.geometry, single_rectangle_gdf)
assert (
clipped.geom_type[0] == "Point"
and clipped.geom_type[1] == "Polygon"
and clipped.geom_type[2] == "LineString"
)
def test_clip_warning_no_extra_geoms(buffered_locations, single_rectangle_gdf):
"""Test a user warning is provided if no new geometry types are found."""
with pytest.warns(UserWarning):
clip(buffered_locations, single_rectangle_gdf, True)
warnings.warn(
"keep_geom_type was called when no extra geometry types existed.",
UserWarning,
)
def test_clip_with_polygon(single_rectangle_gdf):
"""Test clip when using a shapely object"""
polygon = Polygon([(0, 0), (5, 12), (10, 0), (0, 0)])
clipped = clip(single_rectangle_gdf, polygon)
exp_poly = polygon.intersection(
Polygon([(0, 0), (0, 10), (10, 10), (10, 0), (0, 0)])
)
exp = GeoDataFrame([1], geometry=[exp_poly], crs="EPSG:3857")
exp["attr2"] = "site-boundary"
assert_geodataframe_equal(clipped, exp)
def test_clip_with_line_extra_geom(single_rectangle_gdf, sliver_line):
"""When the output of a clipped line returns a geom collection,
and keep_geom_type is True, no geometry collections should be returned."""
clipped = clip(sliver_line, single_rectangle_gdf, keep_geom_type=True)
assert len(clipped.geometry) == 1
# Assert returned data is a not geometry collection
assert not (clipped.geom_type == "GeometryCollection").any()
def test_clip_line_keep_slivers(single_rectangle_gdf, sliver_line):
"""Test the correct output if a point is returned
from a line only geometry type."""
clipped = clip(sliver_line, single_rectangle_gdf)
# Assert returned data is a geometry collection given sliver geoms
assert "Point" == clipped.geom_type[0]
assert "LineString" == clipped.geom_type[1]
def test_clip_no_box_overlap(pointsoutside_nooverlap_gdf, single_rectangle_gdf):
"""Test clip when intersection is empty and boxes do not overlap."""
clipped = clip(pointsoutside_nooverlap_gdf, single_rectangle_gdf)
assert len(clipped) == 0
def test_clip_box_overlap(pointsoutside_overlap_gdf, single_rectangle_gdf):
"""Test clip when intersection is empty and boxes do overlap."""
clipped = clip(pointsoutside_overlap_gdf, single_rectangle_gdf)
assert len(clipped) == 0
def test_warning_extra_geoms_mixed(single_rectangle_gdf, mixed_gdf):
"""Test the correct warnings are raised if keep_geom_type is
called on a mixed GDF"""
with pytest.warns(UserWarning):
clip(mixed_gdf, single_rectangle_gdf, keep_geom_type=True)
def test_warning_geomcoll(single_rectangle_gdf, geomcol_gdf):
"""Test the correct warnings are raised if keep_geom_type is
called on a GDF with GeometryCollection"""
with pytest.warns(UserWarning):
clip(geomcol_gdf, single_rectangle_gdf, keep_geom_type=True)
def test_warning_crs_mismatch(point_gdf, single_rectangle_gdf):
with pytest.warns(UserWarning, match="CRS mismatch between the CRS"):
clip(point_gdf, single_rectangle_gdf.to_crs(4326))
| 36.987835 | 88 | 0.698527 |
import warnings
from distutils.version import LooseVersion
import numpy as np
import pandas as pd
import shapely
from shapely.geometry import (
Polygon,
Point,
LineString,
LinearRing,
GeometryCollection,
MultiPoint,
)
import geopandas
from geopandas import GeoDataFrame, GeoSeries, clip
from geopandas.testing import assert_geodataframe_equal, assert_geoseries_equal
import pytest
pytestmark = pytest.mark.skip_no_sindex
pandas_133 = pd.__version__ == LooseVersion("1.3.3")
@pytest.fixture
def point_gdf():
pts = np.array([[2, 2], [3, 4], [9, 8], [-12, -15]])
gdf = GeoDataFrame([Point(xy) for xy in pts], columns=["geometry"], crs="EPSG:3857")
return gdf
@pytest.fixture
def pointsoutside_nooverlap_gdf():
pts = np.array([[5, 15], [15, 15], [15, 20]])
gdf = GeoDataFrame([Point(xy) for xy in pts], columns=["geometry"], crs="EPSG:3857")
return gdf
@pytest.fixture
def pointsoutside_overlap_gdf():
pts = np.array([[5, 15], [15, 15], [15, 5]])
gdf = GeoDataFrame([Point(xy) for xy in pts], columns=["geometry"], crs="EPSG:3857")
return gdf
@pytest.fixture
def single_rectangle_gdf():
poly_inters = Polygon([(0, 0), (0, 10), (10, 10), (10, 0), (0, 0)])
gdf = GeoDataFrame([1], geometry=[poly_inters], crs="EPSG:3857")
gdf["attr2"] = "site-boundary"
return gdf
@pytest.fixture
def larger_single_rectangle_gdf():
poly_inters = Polygon([(-5, -5), (-5, 15), (15, 15), (15, -5), (-5, -5)])
gdf = GeoDataFrame([1], geometry=[poly_inters], crs="EPSG:3857")
gdf["attr2"] = ["study area"]
return gdf
@pytest.fixture
def buffered_locations(point_gdf):
buffered_locs = point_gdf
buffered_locs["geometry"] = buffered_locs.buffer(4)
buffered_locs["type"] = "plot"
return buffered_locs
@pytest.fixture
def donut_geometry(buffered_locations, single_rectangle_gdf):
donut = geopandas.overlay(
buffered_locations, single_rectangle_gdf, how="symmetric_difference"
)
return donut
@pytest.fixture
def two_line_gdf():
linea = LineString([(1, 1), (2, 2), (3, 2), (5, 3)])
lineb = LineString([(3, 4), (5, 7), (12, 2), (10, 5), (9, 7.5)])
gdf = GeoDataFrame([1, 2], geometry=[linea, lineb], crs="EPSG:3857")
return gdf
@pytest.fixture
def multi_poly_gdf(donut_geometry):
multi_poly = donut_geometry.unary_union
out_df = GeoDataFrame(geometry=GeoSeries(multi_poly), crs="EPSG:3857")
out_df["attr"] = ["pool"]
return out_df
@pytest.fixture
def multi_line(two_line_gdf):
multiline_feat = two_line_gdf.unary_union
linec = LineString([(2, 1), (3, 1), (4, 1), (5, 2)])
out_df = GeoDataFrame(geometry=GeoSeries([multiline_feat, linec]), crs="EPSG:3857")
out_df["attr"] = ["road", "stream"]
return out_df
@pytest.fixture
def multi_point(point_gdf):
multi_point = point_gdf.unary_union
out_df = GeoDataFrame(
geometry=GeoSeries(
[multi_point, Point(2, 5), Point(-11, -14), Point(-10, -12)]
),
crs="EPSG:3857",
)
out_df["attr"] = ["tree", "another tree", "shrub", "berries"]
return out_df
@pytest.fixture
def mixed_gdf():
point = Point([(2, 3), (11, 4), (7, 2), (8, 9), (1, 13)])
line = LineString([(1, 1), (2, 2), (3, 2), (5, 3), (12, 1)])
poly = Polygon([(3, 4), (5, 2), (12, 2), (10, 5), (9, 7.5)])
ring = LinearRing([(1, 1), (2, 2), (3, 2), (5, 3), (12, 1)])
gdf = GeoDataFrame(
[1, 2, 3, 4], geometry=[point, poly, line, ring], crs="EPSG:3857"
)
return gdf
@pytest.fixture
def geomcol_gdf():
point = Point([(2, 3), (11, 4), (7, 2), (8, 9), (1, 13)])
poly = Polygon([(3, 4), (5, 2), (12, 2), (10, 5), (9, 7.5)])
coll = GeometryCollection([point, poly])
gdf = GeoDataFrame([1], geometry=[coll], crs="EPSG:3857")
return gdf
@pytest.fixture
def sliver_line():
linea = LineString([(10, 5), (13, 5), (15, 5)])
lineb = LineString([(1, 1), (2, 2), (3, 2), (5, 3), (12, 1)])
gdf = GeoDataFrame([1, 2], geometry=[linea, lineb], crs="EPSG:3857")
return gdf
def test_not_gdf(single_rectangle_gdf):
with pytest.raises(TypeError):
clip((2, 3), single_rectangle_gdf)
with pytest.raises(TypeError):
clip(single_rectangle_gdf, (2, 3))
def test_returns_gdf(point_gdf, single_rectangle_gdf):
out = clip(point_gdf, single_rectangle_gdf)
assert isinstance(out, GeoDataFrame)
def test_returns_series(point_gdf, single_rectangle_gdf):
out = clip(point_gdf.geometry, single_rectangle_gdf)
assert isinstance(out, GeoSeries)
def test_non_overlapping_geoms():
unit_box = Polygon([(0, 0), (0, 1), (1, 1), (1, 0), (0, 0)])
unit_gdf = GeoDataFrame([1], geometry=[unit_box], crs="EPSG:3857")
non_overlapping_gdf = unit_gdf.copy()
non_overlapping_gdf = non_overlapping_gdf.geometry.apply(
lambda x: shapely.affinity.translate(x, xoff=20)
)
out = clip(unit_gdf, non_overlapping_gdf)
assert_geodataframe_equal(out, unit_gdf.iloc[:0])
out2 = clip(unit_gdf.geometry, non_overlapping_gdf)
assert_geoseries_equal(out2, GeoSeries(crs=unit_gdf.crs))
def test_clip_points(point_gdf, single_rectangle_gdf):
clip_pts = clip(point_gdf, single_rectangle_gdf)
pts = np.array([[2, 2], [3, 4], [9, 8]])
exp = GeoDataFrame([Point(xy) for xy in pts], columns=["geometry"], crs="EPSG:3857")
assert_geodataframe_equal(clip_pts, exp)
def test_clip_points_geom_col_rename(point_gdf, single_rectangle_gdf):
point_gdf_geom_col_rename = point_gdf.rename_geometry("geometry2")
clip_pts = clip(point_gdf_geom_col_rename, single_rectangle_gdf)
pts = np.array([[2, 2], [3, 4], [9, 8]])
exp = GeoDataFrame(
[Point(xy) for xy in pts],
columns=["geometry2"],
crs="EPSG:3857",
geometry="geometry2",
)
assert_geodataframe_equal(clip_pts, exp)
def test_clip_poly(buffered_locations, single_rectangle_gdf):
clipped_poly = clip(buffered_locations, single_rectangle_gdf)
assert len(clipped_poly.geometry) == 3
assert all(clipped_poly.geom_type == "Polygon")
def test_clip_poly_geom_col_rename(buffered_locations, single_rectangle_gdf):
poly_gdf_geom_col_rename = buffered_locations.rename_geometry("geometry2")
clipped_poly = clip(poly_gdf_geom_col_rename, single_rectangle_gdf)
assert len(clipped_poly.geometry) == 3
assert "geometry" not in clipped_poly.keys()
assert "geometry2" in clipped_poly.keys()
def test_clip_poly_series(buffered_locations, single_rectangle_gdf):
clipped_poly = clip(buffered_locations.geometry, single_rectangle_gdf)
assert len(clipped_poly) == 3
assert all(clipped_poly.geom_type == "Polygon")
@pytest.mark.xfail(pandas_133, reason="Regression in pandas 1.3.3 (GH #2101)")
def test_clip_multipoly_keep_slivers(multi_poly_gdf, single_rectangle_gdf):
clipped = clip(multi_poly_gdf, single_rectangle_gdf)
assert np.array_equal(clipped.total_bounds, single_rectangle_gdf.total_bounds)
assert "GeometryCollection" in clipped.geom_type[0]
@pytest.mark.xfail(pandas_133, reason="Regression in pandas 1.3.3 (GH #2101)")
def test_clip_multipoly_keep_geom_type(multi_poly_gdf, single_rectangle_gdf):
clipped = clip(multi_poly_gdf, single_rectangle_gdf, keep_geom_type=True)
assert np.array_equal(clipped.total_bounds, single_rectangle_gdf.total_bounds)
assert (clipped.geom_type == "Polygon").any()
def test_clip_single_multipoly_no_extra_geoms(
buffered_locations, larger_single_rectangle_gdf
):
multi = buffered_locations.dissolve(by="type").reset_index()
clipped = clip(multi, larger_single_rectangle_gdf)
assert clipped.geom_type[0] == "Polygon"
def test_clip_multiline(multi_line, single_rectangle_gdf):
clipped = clip(multi_line, single_rectangle_gdf)
assert clipped.geom_type[0] == "MultiLineString"
def test_clip_multipoint(single_rectangle_gdf, multi_point):
clipped = clip(multi_point, single_rectangle_gdf)
assert clipped.geom_type[0] == "MultiPoint"
assert hasattr(clipped, "attr")
assert len(clipped) == 2
clipped_mutltipoint = MultiPoint(
[
Point(2, 2),
Point(3, 4),
Point(9, 8),
]
)
assert clipped.iloc[0].geometry.wkt == clipped_mutltipoint.wkt
assert all(clipped.intersects(single_rectangle_gdf.unary_union))
def test_clip_lines(two_line_gdf, single_rectangle_gdf):
clip_line = clip(two_line_gdf, single_rectangle_gdf)
assert len(clip_line.geometry) == 2
def test_clip_with_multipolygon(buffered_locations, single_rectangle_gdf):
multi = buffered_locations.dissolve(by="type").reset_index()
clipped = clip(single_rectangle_gdf, multi)
assert clipped.geom_type[0] == "Polygon"
def test_mixed_geom(mixed_gdf, single_rectangle_gdf):
clipped = clip(mixed_gdf, single_rectangle_gdf)
assert (
clipped.geom_type[0] == "Point"
and clipped.geom_type[1] == "Polygon"
and clipped.geom_type[2] == "LineString"
)
def test_mixed_series(mixed_gdf, single_rectangle_gdf):
clipped = clip(mixed_gdf.geometry, single_rectangle_gdf)
assert (
clipped.geom_type[0] == "Point"
and clipped.geom_type[1] == "Polygon"
and clipped.geom_type[2] == "LineString"
)
def test_clip_warning_no_extra_geoms(buffered_locations, single_rectangle_gdf):
with pytest.warns(UserWarning):
clip(buffered_locations, single_rectangle_gdf, True)
warnings.warn(
"keep_geom_type was called when no extra geometry types existed.",
UserWarning,
)
def test_clip_with_polygon(single_rectangle_gdf):
polygon = Polygon([(0, 0), (5, 12), (10, 0), (0, 0)])
clipped = clip(single_rectangle_gdf, polygon)
exp_poly = polygon.intersection(
Polygon([(0, 0), (0, 10), (10, 10), (10, 0), (0, 0)])
)
exp = GeoDataFrame([1], geometry=[exp_poly], crs="EPSG:3857")
exp["attr2"] = "site-boundary"
assert_geodataframe_equal(clipped, exp)
def test_clip_with_line_extra_geom(single_rectangle_gdf, sliver_line):
clipped = clip(sliver_line, single_rectangle_gdf, keep_geom_type=True)
assert len(clipped.geometry) == 1
assert not (clipped.geom_type == "GeometryCollection").any()
def test_clip_line_keep_slivers(single_rectangle_gdf, sliver_line):
clipped = clip(sliver_line, single_rectangle_gdf)
assert "Point" == clipped.geom_type[0]
assert "LineString" == clipped.geom_type[1]
def test_clip_no_box_overlap(pointsoutside_nooverlap_gdf, single_rectangle_gdf):
clipped = clip(pointsoutside_nooverlap_gdf, single_rectangle_gdf)
assert len(clipped) == 0
def test_clip_box_overlap(pointsoutside_overlap_gdf, single_rectangle_gdf):
clipped = clip(pointsoutside_overlap_gdf, single_rectangle_gdf)
assert len(clipped) == 0
def test_warning_extra_geoms_mixed(single_rectangle_gdf, mixed_gdf):
with pytest.warns(UserWarning):
clip(mixed_gdf, single_rectangle_gdf, keep_geom_type=True)
def test_warning_geomcoll(single_rectangle_gdf, geomcol_gdf):
with pytest.warns(UserWarning):
clip(geomcol_gdf, single_rectangle_gdf, keep_geom_type=True)
def test_warning_crs_mismatch(point_gdf, single_rectangle_gdf):
with pytest.warns(UserWarning, match="CRS mismatch between the CRS"):
clip(point_gdf, single_rectangle_gdf.to_crs(4326))
| true | true |
f71ed3c14f730535ffa1e416a7661b591a5802cb | 1,854 | py | Python | src/cpu/testers/gpu_ruby_test/GpuWavefront.py | hyu-iot/gem5 | aeccc8bd8e9a86f96fc7a6f40d978f8494337fc5 | [
"BSD-3-Clause"
] | 765 | 2015-01-14T16:17:04.000Z | 2022-03-28T07:46:28.000Z | src/cpu/testers/gpu_ruby_test/GpuWavefront.py | hyu-iot/gem5 | aeccc8bd8e9a86f96fc7a6f40d978f8494337fc5 | [
"BSD-3-Clause"
] | 30 | 2015-01-01T21:49:38.000Z | 2021-04-20T19:01:54.000Z | src/cpu/testers/gpu_ruby_test/GpuWavefront.py | hyu-iot/gem5 | aeccc8bd8e9a86f96fc7a6f40d978f8494337fc5 | [
"BSD-3-Clause"
] | 807 | 2015-01-06T09:55:38.000Z | 2022-03-30T10:23:36.000Z | # Copyright (c) 2017-2021 Advanced Micro Devices, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from this
# software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
from m5.params import *
from m5.proxy import *
from m5.objects.TesterThread import TesterThread
class GpuWavefront(TesterThread):
type = 'GpuWavefront'
cxx_header = "cpu/testers/gpu_ruby_test/gpu_wavefront.hh"
cxx_class = 'gem5::GpuWavefront'
cu_id = Param.Int("Compute Unit ID")
| 45.219512 | 78 | 0.782093 |
from m5.params import *
from m5.proxy import *
from m5.objects.TesterThread import TesterThread
class GpuWavefront(TesterThread):
type = 'GpuWavefront'
cxx_header = "cpu/testers/gpu_ruby_test/gpu_wavefront.hh"
cxx_class = 'gem5::GpuWavefront'
cu_id = Param.Int("Compute Unit ID")
| true | true |
f71ed45f67578898fc5ebcf1ebe0bb5a8cc1a6f8 | 1,990 | py | Python | Python/Plotagem_varios_dadados.py | italogfernandes/minicurso-arduino-avancado | bbdfe0f4dedf684a9f480268d161c1c145661242 | [
"MIT"
] | 1 | 2016-05-15T00:20:08.000Z | 2016-05-15T00:20:08.000Z | Python/Plotagem_varios_dadados.py | italogfernandes/minicurso-arduino-avancado | bbdfe0f4dedf684a9f480268d161c1c145661242 | [
"MIT"
] | null | null | null | Python/Plotagem_varios_dadados.py | italogfernandes/minicurso-arduino-avancado | bbdfe0f4dedf684a9f480268d161c1c145661242 | [
"MIT"
] | null | null | null | import serial
import numpy
import matplotlib.pyplot as plt
from drawnow import *
ACx= []
ACy= []
ACz = []
Temperatura = []
Girx = []
Giry = []
Girz = []
arduinoData = serial.Serial('/dev/ttyACM0', 9600, timeout=1)
plt.ion() #avisando o matplot que quero fazer algo live
cnt=0
def makeFig(): #desenhadora
plt.ylim(-25000,25000)
plt.title('Dados do sensor')
plt.grid(True)
plt.ylabel('Lido') #Set ylabels
plt.plot(ACx, 'bo-', label='X')
plt.legend(loc='upper left')
plt2=plt.twinx() #Criando um segundo eixo y
plt2.plot(ACy, 'go-', label='Y')
plt2.ticklabel_format(useOffset=False)
plt2.legend(loc='upper right')
plt3=plt.twinx() #Criando um terceiro eixo z
plt3.plot(ACz, 'yo-', label='Z')
plt3.ticklabel_format(useOffset=False)
plt3.legend(loc='lower left')
plt4=plt.twinx()
plt4.plot(ACz, 'rx-', label='T')
plt4.ticklabel_format(useOffset=False)
plt4.legend(loc='center right')
while True: #Uhuuulllll
while (arduinoData.inWaiting()==0): #espera ter dado pra ler
pass
arduinoString = arduinoData.readline()
dataArray = arduinoString.split(',')
if len(dataArray)>6:
ACx.append(float( dataArray[0])) #convertendo dados lidos e adicionando as listas
ACy.append(float( dataArray[1]))
ACz.append(float( dataArray[2]))
Temperatura.append(float( dataArray[3]))
Girx.append(float( dataArray[4]))
Giry.append(float( dataArray[5]))
Girz.append(float( dataArray[6]))
print(dataArray)
drawnow(makeFig)
#plt.pause(0.0000001)
cnt=cnt+1 #contagem de pontos no grafico
if(cnt>100): #se tem mais de 50 apagar o mais antigo
ACx.pop(0)
ACy.pop(0)
ACz.pop(0)
Temperatura.pop(0)
Girx.pop(0)
Giry.pop(0)
Girz.pop(0)
| 28.028169 | 86 | 0.58593 | import serial
import numpy
import matplotlib.pyplot as plt
from drawnow import *
ACx= []
ACy= []
ACz = []
Temperatura = []
Girx = []
Giry = []
Girz = []
arduinoData = serial.Serial('/dev/ttyACM0', 9600, timeout=1)
plt.ion()
cnt=0
def makeFig():
plt.ylim(-25000,25000)
plt.title('Dados do sensor')
plt.grid(True)
plt.ylabel('Lido')
plt.plot(ACx, 'bo-', label='X')
plt.legend(loc='upper left')
plt2=plt.twinx()
plt2.plot(ACy, 'go-', label='Y')
plt2.ticklabel_format(useOffset=False)
plt2.legend(loc='upper right')
plt3=plt.twinx()
plt3.plot(ACz, 'yo-', label='Z')
plt3.ticklabel_format(useOffset=False)
plt3.legend(loc='lower left')
plt4=plt.twinx()
plt4.plot(ACz, 'rx-', label='T')
plt4.ticklabel_format(useOffset=False)
plt4.legend(loc='center right')
while True:
while (arduinoData.inWaiting()==0):
pass
arduinoString = arduinoData.readline()
dataArray = arduinoString.split(',')
if len(dataArray)>6:
ACx.append(float( dataArray[0]))
ACy.append(float( dataArray[1]))
ACz.append(float( dataArray[2]))
Temperatura.append(float( dataArray[3]))
Girx.append(float( dataArray[4]))
Giry.append(float( dataArray[5]))
Girz.append(float( dataArray[6]))
print(dataArray)
drawnow(makeFig)
cnt=cnt+1
if(cnt>100):
ACx.pop(0)
ACy.pop(0)
ACz.pop(0)
Temperatura.pop(0)
Girx.pop(0)
Giry.pop(0)
Girz.pop(0)
| false | true |
f71ed54fb01d8a712469fee4d6600f643a833730 | 8,129 | py | Python | tests/unit/modules/test_modjk.py | byteskeptical/salt | 637fe0b04f38b2274191b005d73b3c6707d7f400 | [
"Apache-2.0"
] | 5 | 2018-05-01T20:51:14.000Z | 2021-11-09T05:43:00.000Z | tests/unit/modules/test_modjk.py | byteskeptical/salt | 637fe0b04f38b2274191b005d73b3c6707d7f400 | [
"Apache-2.0"
] | 86 | 2017-01-27T11:54:46.000Z | 2020-05-20T06:25:26.000Z | tests/unit/modules/test_modjk.py | byteskeptical/salt | 637fe0b04f38b2274191b005d73b3c6707d7f400 | [
"Apache-2.0"
] | 11 | 2017-01-26T19:36:29.000Z | 2021-12-11T07:54:16.000Z | # -*- coding: utf-8 -*-
'''
:codeauthor: Jayesh Kariya <jayeshk@saltstack.com>
'''
# Import Python Libs
from __future__ import absolute_import, print_function, unicode_literals
# Import Salt Testing Libs
from tests.support.unit import TestCase, skipIf
from tests.support.mock import (
patch,
NO_MOCK,
NO_MOCK_REASON
)
# Import Salt Libs
import salt.modules.modjk as modjk
@skipIf(NO_MOCK, NO_MOCK_REASON)
class ModjkTestCase(TestCase):
'''
Test cases for salt.modules.modjk
'''
# 'version' function tests: 1
def test_version(self):
'''
Test for return the modjk version
'''
with patch.object(modjk, '_do_http', return_value=
{'worker.jk_version': 'mod_jk/1.2.37'}):
self.assertEqual(modjk.version(), '1.2.37')
# 'get_running' function tests: 1
def test_get_running(self):
'''
Test for get the current running config (not from disk)
'''
with patch.object(modjk, '_do_http', return_value={}):
self.assertDictEqual(modjk.get_running(), {})
# 'dump_config' function tests: 1
def test_dump_config(self):
'''
Test for dump the original configuration that was loaded from disk
'''
with patch.object(modjk, '_do_http', return_value={}):
self.assertDictEqual(modjk.dump_config(), {})
# 'list_configured_members' function tests: 1
def test_list_configured_members(self):
'''
Test for return a list of member workers from the configuration files
'''
with patch.object(modjk, '_do_http', return_value={}):
self.assertListEqual(modjk.list_configured_members('loadbalancer1'),
[])
with patch.object(modjk, '_do_http', return_value=
{'worker.loadbalancer1.balance_workers': 'SALT'}):
self.assertListEqual(modjk.list_configured_members('loadbalancer1'),
['SALT'])
# 'workers' function tests: 1
def test_workers(self):
'''
Test for return a list of member workers and their status
'''
with patch.object(modjk, '_do_http', return_value=
{'worker.list': 'Salt1,Salt2'}):
self.assertDictEqual(modjk.workers(), {})
# 'recover_all' function tests: 1
def test_recover_all(self):
'''
Test for set the all the workers in lbn to recover and
activate them if they are not
'''
with patch.object(modjk, '_do_http', return_value={}):
self.assertDictEqual(modjk.recover_all('loadbalancer1'), {})
with patch.object(modjk, '_do_http', return_value=
{'worker.loadbalancer1.balance_workers': 'SALT'}):
with patch.object(modjk, 'worker_status',
return_value={'activation': 'ACT',
'state': 'OK'}):
self.assertDictEqual(modjk.recover_all('loadbalancer1'),
{'SALT': {'activation': 'ACT',
'state': 'OK'}})
# 'reset_stats' function tests: 1
def test_reset_stats(self):
'''
Test for reset all runtime statistics for the load balancer
'''
with patch.object(modjk, '_do_http', return_value=
{'worker.result.type': 'OK'}):
self.assertTrue(modjk.reset_stats('loadbalancer1'))
# 'lb_edit' function tests: 1
def test_lb_edit(self):
'''
Test for edit the loadbalancer settings
'''
with patch.object(modjk, '_do_http', return_value=
{'worker.result.type': 'OK'}):
self.assertTrue(modjk.lb_edit('loadbalancer1', {'vlr': 1,
'vlt': 60}))
# 'bulk_stop' function tests: 1
def test_bulk_stop(self):
'''
Test for stop all the given workers in the specific load balancer
'''
with patch.object(modjk, '_do_http', return_value=
{'worker.result.type': 'OK'}):
self.assertTrue(modjk.bulk_stop(["node1", "node2", "node3"],
'loadbalancer1'))
# 'bulk_activate' function tests: 1
def test_bulk_activate(self):
'''
Test for activate all the given workers in the specific load balancer
'''
with patch.object(modjk, '_do_http', return_value=
{'worker.result.type': 'OK'}):
self.assertTrue(modjk.bulk_activate(["node1", "node2", "node3"],
'loadbalancer1'))
# 'bulk_disable' function tests: 1
def test_bulk_disable(self):
'''
Test for disable all the given workers in the specific load balancer
'''
with patch.object(modjk, '_do_http', return_value=
{'worker.result.type': 'OK'}):
self.assertTrue(modjk.bulk_disable(["node1", "node2", "node3"],
'loadbalancer1'))
# 'bulk_recover' function tests: 1
def test_bulk_recover(self):
'''
Test for recover all the given workers in the specific load balancer
'''
with patch.object(modjk, '_do_http', return_value=
{'worker.result.type': 'OK'}):
self.assertTrue(modjk.bulk_recover(["node1", "node2", "node3"],
'loadbalancer1'))
# 'worker_status' function tests: 1
def test_worker_status(self):
'''
Test for return the state of the worker
'''
with patch.object(modjk, '_do_http', return_value=
{'worker.node1.activation': 'ACT',
'worker.node1.state': 'OK'}):
self.assertDictEqual(modjk.worker_status("node1"),
{'activation': 'ACT', 'state': 'OK'})
with patch.object(modjk, '_do_http', return_value={}):
self.assertFalse(modjk.worker_status("node1"))
# 'worker_recover' function tests: 1
def test_worker_recover(self):
'''
Test for set the worker to recover this module will fail
if it is in OK state
'''
with patch.object(modjk, '_do_http', return_value={}):
self.assertDictEqual(modjk.worker_recover("node1", 'loadbalancer1'),
{})
# 'worker_disable' function tests: 1
def test_worker_disable(self):
'''
Test for set the worker to disable state in the lbn load balancer
'''
with patch.object(modjk, '_do_http', return_value=
{'worker.result.type': 'OK'}):
self.assertTrue(modjk.worker_disable('node1', 'loadbalancer1'))
# 'worker_activate' function tests: 1
def test_worker_activate(self):
'''
Test for set the worker to activate state in the lbn load balancer
'''
with patch.object(modjk, '_do_http', return_value=
{'worker.result.type': 'OK'}):
self.assertTrue(modjk.worker_activate('node1', 'loadbalancer1'))
# 'worker_stop' function tests: 1
def test_worker_stop(self):
'''
Test for set the worker to stopped state in the lbn load balancer
'''
with patch.object(modjk, '_do_http', return_value=
{'worker.result.type': 'OK'}):
self.assertTrue(modjk.worker_stop('node1', 'loadbalancer1'))
# 'worker_edit' function tests: 1
def test_worker_edit(self):
'''
Test for edit the worker settings
'''
with patch.object(modjk, '_do_http', return_value=
{'worker.result.type': 'OK'}):
self.assertTrue(modjk.worker_edit('node1', 'loadbalancer1',
{'vwf': 500, 'vwd': 60}))
| 35.497817 | 80 | 0.549637 |
from __future__ import absolute_import, print_function, unicode_literals
from tests.support.unit import TestCase, skipIf
from tests.support.mock import (
patch,
NO_MOCK,
NO_MOCK_REASON
)
import salt.modules.modjk as modjk
@skipIf(NO_MOCK, NO_MOCK_REASON)
class ModjkTestCase(TestCase):
def test_version(self):
with patch.object(modjk, '_do_http', return_value=
{'worker.jk_version': 'mod_jk/1.2.37'}):
self.assertEqual(modjk.version(), '1.2.37')
def test_get_running(self):
with patch.object(modjk, '_do_http', return_value={}):
self.assertDictEqual(modjk.get_running(), {})
def test_dump_config(self):
with patch.object(modjk, '_do_http', return_value={}):
self.assertDictEqual(modjk.dump_config(), {})
def test_list_configured_members(self):
with patch.object(modjk, '_do_http', return_value={}):
self.assertListEqual(modjk.list_configured_members('loadbalancer1'),
[])
with patch.object(modjk, '_do_http', return_value=
{'worker.loadbalancer1.balance_workers': 'SALT'}):
self.assertListEqual(modjk.list_configured_members('loadbalancer1'),
['SALT'])
def test_workers(self):
with patch.object(modjk, '_do_http', return_value=
{'worker.list': 'Salt1,Salt2'}):
self.assertDictEqual(modjk.workers(), {})
def test_recover_all(self):
with patch.object(modjk, '_do_http', return_value={}):
self.assertDictEqual(modjk.recover_all('loadbalancer1'), {})
with patch.object(modjk, '_do_http', return_value=
{'worker.loadbalancer1.balance_workers': 'SALT'}):
with patch.object(modjk, 'worker_status',
return_value={'activation': 'ACT',
'state': 'OK'}):
self.assertDictEqual(modjk.recover_all('loadbalancer1'),
{'SALT': {'activation': 'ACT',
'state': 'OK'}})
def test_reset_stats(self):
with patch.object(modjk, '_do_http', return_value=
{'worker.result.type': 'OK'}):
self.assertTrue(modjk.reset_stats('loadbalancer1'))
def test_lb_edit(self):
with patch.object(modjk, '_do_http', return_value=
{'worker.result.type': 'OK'}):
self.assertTrue(modjk.lb_edit('loadbalancer1', {'vlr': 1,
'vlt': 60}))
def test_bulk_stop(self):
with patch.object(modjk, '_do_http', return_value=
{'worker.result.type': 'OK'}):
self.assertTrue(modjk.bulk_stop(["node1", "node2", "node3"],
'loadbalancer1'))
def test_bulk_activate(self):
with patch.object(modjk, '_do_http', return_value=
{'worker.result.type': 'OK'}):
self.assertTrue(modjk.bulk_activate(["node1", "node2", "node3"],
'loadbalancer1'))
def test_bulk_disable(self):
with patch.object(modjk, '_do_http', return_value=
{'worker.result.type': 'OK'}):
self.assertTrue(modjk.bulk_disable(["node1", "node2", "node3"],
'loadbalancer1'))
def test_bulk_recover(self):
with patch.object(modjk, '_do_http', return_value=
{'worker.result.type': 'OK'}):
self.assertTrue(modjk.bulk_recover(["node1", "node2", "node3"],
'loadbalancer1'))
def test_worker_status(self):
with patch.object(modjk, '_do_http', return_value=
{'worker.node1.activation': 'ACT',
'worker.node1.state': 'OK'}):
self.assertDictEqual(modjk.worker_status("node1"),
{'activation': 'ACT', 'state': 'OK'})
with patch.object(modjk, '_do_http', return_value={}):
self.assertFalse(modjk.worker_status("node1"))
def test_worker_recover(self):
with patch.object(modjk, '_do_http', return_value={}):
self.assertDictEqual(modjk.worker_recover("node1", 'loadbalancer1'),
{})
def test_worker_disable(self):
with patch.object(modjk, '_do_http', return_value=
{'worker.result.type': 'OK'}):
self.assertTrue(modjk.worker_disable('node1', 'loadbalancer1'))
def test_worker_activate(self):
with patch.object(modjk, '_do_http', return_value=
{'worker.result.type': 'OK'}):
self.assertTrue(modjk.worker_activate('node1', 'loadbalancer1'))
def test_worker_stop(self):
with patch.object(modjk, '_do_http', return_value=
{'worker.result.type': 'OK'}):
self.assertTrue(modjk.worker_stop('node1', 'loadbalancer1'))
def test_worker_edit(self):
with patch.object(modjk, '_do_http', return_value=
{'worker.result.type': 'OK'}):
self.assertTrue(modjk.worker_edit('node1', 'loadbalancer1',
{'vwf': 500, 'vwd': 60}))
| true | true |
f71ed55e2fa3d1075c8a3ae94a4d68e6353fb1e3 | 15,527 | py | Python | eval.py | mendelmaker/yolact | 83e7d08f03951c49a9731759e8458c51fe0922d7 | [
"MIT"
] | null | null | null | eval.py | mendelmaker/yolact | 83e7d08f03951c49a9731759e8458c51fe0922d7 | [
"MIT"
] | null | null | null | eval.py | mendelmaker/yolact | 83e7d08f03951c49a9731759e8458c51fe0922d7 | [
"MIT"
] | null | null | null | import json
import numpy as np
import torch
import pycocotools
import argparse
from pycocotools.coco import COCO
from pycocotools.cocoeval import COCOeval
from terminaltables import AsciiTable
from collections import OrderedDict
import torch.backends.cudnn as cudnn
from data.coco import COCODetection
from modules.build_yolact import Yolact
from utils.augmentations import BaseTransform
from utils.functions import MovingAverage, ProgressBar
from utils.box_utils import bbox_iou, mask_iou
from utils import timer
from utils.output_utils import after_nms, NMS
from data.config import cfg, update_config, COCO_LABEL_MAP
parser = argparse.ArgumentParser(description='YOLACT COCO Evaluation')
parser.add_argument('--trained_model', default='yolact_base_54_800000.pth', type=str)
parser.add_argument('--visual_top_k', default=5, type=int, help='Further restrict the number of predictions to parse')
parser.add_argument('--traditional_nms', default=False, action='store_true', help='Whether to use traditional nms.')
parser.add_argument('--max_num', default=-1, type=int, help='The maximum number of images for test, set to -1 for all.')
parser.add_argument('--cocoapi', action='store_true', help='Whether to use cocoapi to evaluate results.')
class Make_json:
def __init__(self):
self.bbox_data = []
self.mask_data = []
self.coco_cats = {}
for coco_id, real_id in COCO_LABEL_MAP.items():
class_id = real_id - 1
self.coco_cats[class_id] = coco_id
def add_bbox(self, image_id: int, category_id: int, bbox: list, score: float):
""" Note that bbox should be a list or tuple of (x1, y1, x2, y2) """
bbox = [bbox[0], bbox[1], bbox[2] - bbox[0], bbox[3] - bbox[1]]
# Round to the nearest 10th to avoid huge file sizes, as COCO suggests
bbox = [round(float(x) * 10) / 10 for x in bbox]
self.bbox_data.append({'image_id': int(image_id),
'category_id': self.coco_cats[int(category_id)],
'bbox': bbox,
'score': float(score)})
def add_mask(self, image_id: int, category_id: int, segmentation: np.ndarray, score: float):
""" The segmentation should be the full mask, the size of the image and with size [h, w]. """
rle = pycocotools.mask.encode(np.asfortranarray(segmentation.astype(np.uint8)))
rle['counts'] = rle['counts'].decode('ascii') # json.dump doesn't like bytes strings
self.mask_data.append({'image_id': int(image_id),
'category_id': self.coco_cats[int(category_id)],
'segmentation': rle,
'score': float(score)})
def dump(self):
dump_arguments = [(self.bbox_data, f'results/bbox_detections.json'),
(self.mask_data, f'results/mask_detections.json')]
for data, path in dump_arguments:
with open(path, 'w') as f:
json.dump(data, f)
class APDataObject:
"""Stores all the information necessary to calculate the AP for one IoU and one class."""
def __init__(self):
self.data_points = []
self.num_gt_positives = 0
def push(self, score: float, is_true: bool):
self.data_points.append((score, is_true))
def add_gt_positives(self, num_positives: int):
""" Call this once per image. """
self.num_gt_positives += num_positives
def is_empty(self) -> bool:
return len(self.data_points) == 0 and self.num_gt_positives == 0
def get_ap(self) -> float:
""" Warning: result not cached. """
if self.num_gt_positives == 0:
return 0
# Sort descending by score
self.data_points.sort(key=lambda x: -x[0])
precisions = []
recalls = []
num_true = 0
num_false = 0
# Compute the precision-recall curve. The x axis is recalls and the y axis precisions.
for datum in self.data_points:
# datum[1] is whether the detection a true or false positive
if datum[1]:
num_true += 1
else:
num_false += 1
precision = num_true / (num_true + num_false)
recall = num_true / self.num_gt_positives
precisions.append(precision)
recalls.append(recall)
# Smooth the curve by computing [max(precisions[i:]) for i in range(len(precisions))]
# Basically, remove any temporary dips from the curve.
# At least that's what I think, idk. COCOEval did it so I do too.
for i in range(len(precisions) - 1, 0, -1):
if precisions[i] > precisions[i - 1]:
precisions[i - 1] = precisions[i]
# Compute the integral of precision(recall) d_recall from recall=0->1 using fixed-length riemann summation with 101 bars.
y_range = [0] * 101 # idx 0 is recall == 0.0 and idx 100 is recall == 1.00
x_range = np.array([x / 100 for x in range(101)])
recalls = np.array(recalls)
# I realize this is weird, but all it does is find the nearest precision(x) for a given x in x_range.
# Basically, if the closest recall we have to 0.01 is 0.009 this sets precision(0.01) = precision(0.009).
# I approximate the integral this way, because that's how COCOEval does it.
indices = np.searchsorted(recalls, x_range, side='left')
for bar_idx, precision_idx in enumerate(indices):
if precision_idx < len(precisions):
y_range[bar_idx] = precisions[precision_idx]
# Finally compute the riemann sum to get our integral.
# avg([precision(x) for x in 0:0.01:1])
return sum(y_range) / len(y_range)
def prep_metrics(ap_data, nms_outs, gt, gt_masks, h, w, num_crowd, image_id, make_json, cocoapi):
""" Returns a list of APs for this image, with each element being for a class """
with timer.env('After NMS'):
pred_classes, pred_confs, pred_boxes, pred_masks = after_nms(nms_outs, h, w)
if pred_classes.size(0) == 0:
return
pred_classes = list(pred_classes.cpu().numpy().astype(int))
pred_confs = list(pred_confs.cpu().numpy().astype(float))
pred_masks = pred_masks.view(-1, h * w).cuda() if cuda else pred_masks.view(-1, h * w)
pred_boxes = pred_boxes.cuda() if cuda else pred_boxes
if cocoapi:
with timer.env('Output json'):
pred_boxes = pred_boxes.cpu().numpy()
pred_masks = pred_masks.view(-1, h, w).cpu().numpy()
for i in range(pred_masks.shape[0]):
# Make sure that the bounding box actually makes sense and a mask was produced
if (pred_boxes[i, 3] - pred_boxes[i, 1]) * (pred_boxes[i, 2] - pred_boxes[i, 0]) > 0:
make_json.add_bbox(image_id, pred_classes[i], pred_boxes[i, :], pred_confs[i])
make_json.add_mask(image_id, pred_classes[i], pred_masks[i, :, :], pred_confs[i])
return
with timer.env('Prepare gt'):
gt_boxes = torch.Tensor(gt[:, :4])
gt_boxes[:, [0, 2]] *= w
gt_boxes[:, [1, 3]] *= h
gt_classes = list(gt[:, 4].astype(int))
gt_masks = torch.Tensor(gt_masks).view(-1, h * w)
if num_crowd > 0:
split = lambda x: (x[-num_crowd:], x[:-num_crowd])
crowd_boxes, gt_boxes = split(gt_boxes)
crowd_masks, gt_masks = split(gt_masks)
crowd_classes, gt_classes = split(gt_classes)
with timer.env('Eval Setup'):
mask_iou_cache = mask_iou(pred_masks, gt_masks)
bbox_iou_cache = bbox_iou(pred_boxes.float(), gt_boxes.float())
if num_crowd > 0:
crowd_mask_iou_cache = mask_iou(pred_masks, crowd_masks, iscrowd=True)
crowd_bbox_iou_cache = bbox_iou(pred_boxes.float(), crowd_boxes.float(), iscrowd=True)
else:
crowd_mask_iou_cache = None
crowd_bbox_iou_cache = None
iou_types = [('box', lambda i, j: bbox_iou_cache[i, j].item(), lambda i, j: crowd_bbox_iou_cache[i, j].item()),
('mask', lambda i, j: mask_iou_cache[i, j].item(), lambda i, j: crowd_mask_iou_cache[i, j].item())]
timer.start('Main loop')
for _class in set(pred_classes + gt_classes):
num_gt_per_class = gt_classes.count(_class)
for iouIdx in range(len(iou_thresholds)):
iou_threshold = iou_thresholds[iouIdx]
for iou_type, iou_func, crowd_func in iou_types:
gt_used = [False] * len(gt_classes)
ap_obj = ap_data[iou_type][iouIdx][_class]
ap_obj.add_gt_positives(num_gt_per_class)
for i, pred_class in enumerate(pred_classes):
if pred_class != _class:
continue
max_iou_found = iou_threshold
max_match_idx = -1
for j, gt_class in enumerate(gt_classes):
if gt_used[j] or gt_class != _class:
continue
iou = iou_func(i, j)
if iou > max_iou_found:
max_iou_found = iou
max_match_idx = j
if max_match_idx >= 0:
gt_used[max_match_idx] = True
ap_obj.push(pred_confs[i], True)
else:
# If the detection matches a crowd, we can just ignore it
matched_crowd = False
if num_crowd > 0:
for j in range(len(crowd_classes)):
if crowd_classes[j] != _class:
continue
iou = crowd_func(i, j)
if iou > iou_threshold:
matched_crowd = True
break
# All this crowd code so that we can make sure that our eval code gives the
# same result as COCOEval. There aren't even that many crowd annotations to
# begin with, but accuracy is of the utmost importance.
if not matched_crowd:
ap_obj.push(pred_confs[i], False)
timer.stop('Main loop')
def calc_map(ap_data):
print('\nCalculating mAP...')
aps = [{'box': [], 'mask': []} for _ in iou_thresholds]
for _class in range(len(cfg.dataset.class_names)):
for iou_idx in range(len(iou_thresholds)):
for iou_type in ('box', 'mask'):
ap_obj = ap_data[iou_type][iou_idx][_class]
if not ap_obj.is_empty():
aps[iou_idx][iou_type].append(ap_obj.get_ap())
all_maps = {'box': OrderedDict(), 'mask': OrderedDict()}
for iou_type in ('box', 'mask'):
all_maps[iou_type]['all'] = 0 # Make this first in the ordereddict
for i, threshold in enumerate(iou_thresholds):
mAP = sum(aps[i][iou_type]) / len(aps[i][iou_type]) * 100 if len(aps[i][iou_type]) > 0 else 0
all_maps[iou_type][int(threshold * 100)] = mAP
all_maps[iou_type]['all'] = (sum(all_maps[iou_type].values()) / (len(all_maps[iou_type].values()) - 1))
row1 = list(all_maps['box'].keys())
row1.insert(0, ' ')
row2 = list(all_maps['box'].values())
row2 = [round(aa, 2) for aa in row2]
row2.insert(0, 'box')
row3 = list(all_maps['mask'].values())
row3 = [round(aa, 2) for aa in row3]
row3.insert(0, 'mask')
table = [row1, row2, row3]
table = AsciiTable(table)
return table.table, row2, row3
def evaluate(net, dataset, max_num=-1, during_training=False, cocoapi=False, traditional_nms=False):
frame_times = MovingAverage()
dataset_size = len(dataset) if max_num < 0 else min(max_num, len(dataset))
dataset_indices = list(range(len(dataset)))
dataset_indices = dataset_indices[:dataset_size]
progress_bar = ProgressBar(40, dataset_size)
# For each class and iou, stores tuples (score, isPositive)
# Index ap_data[type][iouIdx][classIdx]
ap_data = {'box': [[APDataObject() for _ in cfg.dataset.class_names] for _ in iou_thresholds],
'mask': [[APDataObject() for _ in cfg.dataset.class_names] for _ in iou_thresholds]}
make_json = Make_json()
for i, image_idx in enumerate(dataset_indices):
timer.reset()
with timer.env('Data loading'):
img, gt, gt_masks, h, w, num_crowd = dataset.pull_item(image_idx)
batch = img.unsqueeze(0)
if cuda:
batch = batch.cuda()
with timer.env('Network forward'):
net_outs = net(batch)
nms_outs = NMS(net_outs, traditional_nms)
prep_metrics(ap_data, nms_outs, gt, gt_masks, h, w, num_crowd, dataset.ids[image_idx], make_json, cocoapi)
# First couple of images take longer because we're constructing the graph.
# Since that's technically initialization, don't include those in the FPS calculations.
fps = 0
if i > 1 and not during_training:
frame_times.add(timer.total_time())
fps = 1 / frame_times.get_avg()
progress = (i + 1) / dataset_size * 100
progress_bar.set_val(i + 1)
print('\rProcessing: %s %d / %d (%.2f%%) %.2f fps ' % (
repr(progress_bar), i + 1, dataset_size, progress, fps), end='')
else:
if cocoapi:
make_json.dump()
print(f'\nJson files dumped, saved in: \'results/\', start evaluting.')
gt_annotations = COCO(cfg.dataset.valid_info)
bbox_dets = gt_annotations.loadRes(f'results/bbox_detections.json')
mask_dets = gt_annotations.loadRes(f'results/mask_detections.json')
print('\nEvaluating BBoxes:')
bbox_eval = COCOeval(gt_annotations, bbox_dets, 'bbox')
bbox_eval.evaluate()
bbox_eval.accumulate()
bbox_eval.summarize()
print('\nEvaluating Masks:')
bbox_eval = COCOeval(gt_annotations, mask_dets, 'segm')
bbox_eval.evaluate()
bbox_eval.accumulate()
bbox_eval.summarize()
return
table, box_row, mask_row = calc_map(ap_data)
print(table)
return table, box_row, mask_row
iou_thresholds = [x / 100 for x in range(50, 100, 5)]
cuda = torch.cuda.is_available()
if __name__ == '__main__':
args = parser.parse_args()
strs = args.trained_model.split('_')
config = f'{strs[-3]}_{strs[-2]}_config'
update_config(config)
print(f'\nUsing \'{config}\' according to the trained_model.\n')
with torch.no_grad():
if cuda:
cudnn.benchmark = True
cudnn.fastest = True
torch.set_default_tensor_type('torch.cuda.FloatTensor')
else:
torch.set_default_tensor_type('torch.FloatTensor')
dataset = COCODetection(cfg.dataset.valid_images, cfg.dataset.valid_info, augmentation=BaseTransform())
net = Yolact()
net.load_weights('weights/' + args.trained_model, cuda)
net.eval()
print('\nModel loaded.\n')
if cuda:
net = net.cuda()
evaluate(net, dataset, args.max_num, False, args.cocoapi, args.traditional_nms)
| 40.860526 | 129 | 0.597153 | import json
import numpy as np
import torch
import pycocotools
import argparse
from pycocotools.coco import COCO
from pycocotools.cocoeval import COCOeval
from terminaltables import AsciiTable
from collections import OrderedDict
import torch.backends.cudnn as cudnn
from data.coco import COCODetection
from modules.build_yolact import Yolact
from utils.augmentations import BaseTransform
from utils.functions import MovingAverage, ProgressBar
from utils.box_utils import bbox_iou, mask_iou
from utils import timer
from utils.output_utils import after_nms, NMS
from data.config import cfg, update_config, COCO_LABEL_MAP
parser = argparse.ArgumentParser(description='YOLACT COCO Evaluation')
parser.add_argument('--trained_model', default='yolact_base_54_800000.pth', type=str)
parser.add_argument('--visual_top_k', default=5, type=int, help='Further restrict the number of predictions to parse')
parser.add_argument('--traditional_nms', default=False, action='store_true', help='Whether to use traditional nms.')
parser.add_argument('--max_num', default=-1, type=int, help='The maximum number of images for test, set to -1 for all.')
parser.add_argument('--cocoapi', action='store_true', help='Whether to use cocoapi to evaluate results.')
class Make_json:
def __init__(self):
self.bbox_data = []
self.mask_data = []
self.coco_cats = {}
for coco_id, real_id in COCO_LABEL_MAP.items():
class_id = real_id - 1
self.coco_cats[class_id] = coco_id
def add_bbox(self, image_id: int, category_id: int, bbox: list, score: float):
bbox = [bbox[0], bbox[1], bbox[2] - bbox[0], bbox[3] - bbox[1]]
bbox = [round(float(x) * 10) / 10 for x in bbox]
self.bbox_data.append({'image_id': int(image_id),
'category_id': self.coco_cats[int(category_id)],
'bbox': bbox,
'score': float(score)})
def add_mask(self, image_id: int, category_id: int, segmentation: np.ndarray, score: float):
rle = pycocotools.mask.encode(np.asfortranarray(segmentation.astype(np.uint8)))
rle['counts'] = rle['counts'].decode('ascii')
self.mask_data.append({'image_id': int(image_id),
'category_id': self.coco_cats[int(category_id)],
'segmentation': rle,
'score': float(score)})
def dump(self):
dump_arguments = [(self.bbox_data, f'results/bbox_detections.json'),
(self.mask_data, f'results/mask_detections.json')]
for data, path in dump_arguments:
with open(path, 'w') as f:
json.dump(data, f)
class APDataObject:
def __init__(self):
self.data_points = []
self.num_gt_positives = 0
def push(self, score: float, is_true: bool):
self.data_points.append((score, is_true))
def add_gt_positives(self, num_positives: int):
self.num_gt_positives += num_positives
def is_empty(self) -> bool:
return len(self.data_points) == 0 and self.num_gt_positives == 0
def get_ap(self) -> float:
if self.num_gt_positives == 0:
return 0
# Sort descending by score
self.data_points.sort(key=lambda x: -x[0])
precisions = []
recalls = []
num_true = 0
num_false = 0
# Compute the precision-recall curve. The x axis is recalls and the y axis precisions.
for datum in self.data_points:
# datum[1] is whether the detection a true or false positive
if datum[1]:
num_true += 1
else:
num_false += 1
precision = num_true / (num_true + num_false)
recall = num_true / self.num_gt_positives
precisions.append(precision)
recalls.append(recall)
# Smooth the curve by computing [max(precisions[i:]) for i in range(len(precisions))]
# Basically, remove any temporary dips from the curve.
# At least that's what I think, idk. COCOEval did it so I do too.
for i in range(len(precisions) - 1, 0, -1):
if precisions[i] > precisions[i - 1]:
precisions[i - 1] = precisions[i]
y_range = [0] * 101
x_range = np.array([x / 100 for x in range(101)])
recalls = np.array(recalls)
indices = np.searchsorted(recalls, x_range, side='left')
for bar_idx, precision_idx in enumerate(indices):
if precision_idx < len(precisions):
y_range[bar_idx] = precisions[precision_idx]
# Finally compute the riemann sum to get our integral.
# avg([precision(x) for x in 0:0.01:1])
return sum(y_range) / len(y_range)
def prep_metrics(ap_data, nms_outs, gt, gt_masks, h, w, num_crowd, image_id, make_json, cocoapi):
with timer.env('After NMS'):
pred_classes, pred_confs, pred_boxes, pred_masks = after_nms(nms_outs, h, w)
if pred_classes.size(0) == 0:
return
pred_classes = list(pred_classes.cpu().numpy().astype(int))
pred_confs = list(pred_confs.cpu().numpy().astype(float))
pred_masks = pred_masks.view(-1, h * w).cuda() if cuda else pred_masks.view(-1, h * w)
pred_boxes = pred_boxes.cuda() if cuda else pred_boxes
if cocoapi:
with timer.env('Output json'):
pred_boxes = pred_boxes.cpu().numpy()
pred_masks = pred_masks.view(-1, h, w).cpu().numpy()
for i in range(pred_masks.shape[0]):
# Make sure that the bounding box actually makes sense and a mask was produced
if (pred_boxes[i, 3] - pred_boxes[i, 1]) * (pred_boxes[i, 2] - pred_boxes[i, 0]) > 0:
make_json.add_bbox(image_id, pred_classes[i], pred_boxes[i, :], pred_confs[i])
make_json.add_mask(image_id, pred_classes[i], pred_masks[i, :, :], pred_confs[i])
return
with timer.env('Prepare gt'):
gt_boxes = torch.Tensor(gt[:, :4])
gt_boxes[:, [0, 2]] *= w
gt_boxes[:, [1, 3]] *= h
gt_classes = list(gt[:, 4].astype(int))
gt_masks = torch.Tensor(gt_masks).view(-1, h * w)
if num_crowd > 0:
split = lambda x: (x[-num_crowd:], x[:-num_crowd])
crowd_boxes, gt_boxes = split(gt_boxes)
crowd_masks, gt_masks = split(gt_masks)
crowd_classes, gt_classes = split(gt_classes)
with timer.env('Eval Setup'):
mask_iou_cache = mask_iou(pred_masks, gt_masks)
bbox_iou_cache = bbox_iou(pred_boxes.float(), gt_boxes.float())
if num_crowd > 0:
crowd_mask_iou_cache = mask_iou(pred_masks, crowd_masks, iscrowd=True)
crowd_bbox_iou_cache = bbox_iou(pred_boxes.float(), crowd_boxes.float(), iscrowd=True)
else:
crowd_mask_iou_cache = None
crowd_bbox_iou_cache = None
iou_types = [('box', lambda i, j: bbox_iou_cache[i, j].item(), lambda i, j: crowd_bbox_iou_cache[i, j].item()),
('mask', lambda i, j: mask_iou_cache[i, j].item(), lambda i, j: crowd_mask_iou_cache[i, j].item())]
timer.start('Main loop')
for _class in set(pred_classes + gt_classes):
num_gt_per_class = gt_classes.count(_class)
for iouIdx in range(len(iou_thresholds)):
iou_threshold = iou_thresholds[iouIdx]
for iou_type, iou_func, crowd_func in iou_types:
gt_used = [False] * len(gt_classes)
ap_obj = ap_data[iou_type][iouIdx][_class]
ap_obj.add_gt_positives(num_gt_per_class)
for i, pred_class in enumerate(pred_classes):
if pred_class != _class:
continue
max_iou_found = iou_threshold
max_match_idx = -1
for j, gt_class in enumerate(gt_classes):
if gt_used[j] or gt_class != _class:
continue
iou = iou_func(i, j)
if iou > max_iou_found:
max_iou_found = iou
max_match_idx = j
if max_match_idx >= 0:
gt_used[max_match_idx] = True
ap_obj.push(pred_confs[i], True)
else:
# If the detection matches a crowd, we can just ignore it
matched_crowd = False
if num_crowd > 0:
for j in range(len(crowd_classes)):
if crowd_classes[j] != _class:
continue
iou = crowd_func(i, j)
if iou > iou_threshold:
matched_crowd = True
break
# All this crowd code so that we can make sure that our eval code gives the
# same result as COCOEval. There aren't even that many crowd annotations to
if not matched_crowd:
ap_obj.push(pred_confs[i], False)
timer.stop('Main loop')
def calc_map(ap_data):
print('\nCalculating mAP...')
aps = [{'box': [], 'mask': []} for _ in iou_thresholds]
for _class in range(len(cfg.dataset.class_names)):
for iou_idx in range(len(iou_thresholds)):
for iou_type in ('box', 'mask'):
ap_obj = ap_data[iou_type][iou_idx][_class]
if not ap_obj.is_empty():
aps[iou_idx][iou_type].append(ap_obj.get_ap())
all_maps = {'box': OrderedDict(), 'mask': OrderedDict()}
for iou_type in ('box', 'mask'):
all_maps[iou_type]['all'] = 0
for i, threshold in enumerate(iou_thresholds):
mAP = sum(aps[i][iou_type]) / len(aps[i][iou_type]) * 100 if len(aps[i][iou_type]) > 0 else 0
all_maps[iou_type][int(threshold * 100)] = mAP
all_maps[iou_type]['all'] = (sum(all_maps[iou_type].values()) / (len(all_maps[iou_type].values()) - 1))
row1 = list(all_maps['box'].keys())
row1.insert(0, ' ')
row2 = list(all_maps['box'].values())
row2 = [round(aa, 2) for aa in row2]
row2.insert(0, 'box')
row3 = list(all_maps['mask'].values())
row3 = [round(aa, 2) for aa in row3]
row3.insert(0, 'mask')
table = [row1, row2, row3]
table = AsciiTable(table)
return table.table, row2, row3
def evaluate(net, dataset, max_num=-1, during_training=False, cocoapi=False, traditional_nms=False):
frame_times = MovingAverage()
dataset_size = len(dataset) if max_num < 0 else min(max_num, len(dataset))
dataset_indices = list(range(len(dataset)))
dataset_indices = dataset_indices[:dataset_size]
progress_bar = ProgressBar(40, dataset_size)
ap_data = {'box': [[APDataObject() for _ in cfg.dataset.class_names] for _ in iou_thresholds],
'mask': [[APDataObject() for _ in cfg.dataset.class_names] for _ in iou_thresholds]}
make_json = Make_json()
for i, image_idx in enumerate(dataset_indices):
timer.reset()
with timer.env('Data loading'):
img, gt, gt_masks, h, w, num_crowd = dataset.pull_item(image_idx)
batch = img.unsqueeze(0)
if cuda:
batch = batch.cuda()
with timer.env('Network forward'):
net_outs = net(batch)
nms_outs = NMS(net_outs, traditional_nms)
prep_metrics(ap_data, nms_outs, gt, gt_masks, h, w, num_crowd, dataset.ids[image_idx], make_json, cocoapi)
# Since that's technically initialization, don't include those in the FPS calculations.
fps = 0
if i > 1 and not during_training:
frame_times.add(timer.total_time())
fps = 1 / frame_times.get_avg()
progress = (i + 1) / dataset_size * 100
progress_bar.set_val(i + 1)
print('\rProcessing: %s %d / %d (%.2f%%) %.2f fps ' % (
repr(progress_bar), i + 1, dataset_size, progress, fps), end='')
else:
if cocoapi:
make_json.dump()
print(f'\nJson files dumped, saved in: \'results/\', start evaluting.')
gt_annotations = COCO(cfg.dataset.valid_info)
bbox_dets = gt_annotations.loadRes(f'results/bbox_detections.json')
mask_dets = gt_annotations.loadRes(f'results/mask_detections.json')
print('\nEvaluating BBoxes:')
bbox_eval = COCOeval(gt_annotations, bbox_dets, 'bbox')
bbox_eval.evaluate()
bbox_eval.accumulate()
bbox_eval.summarize()
print('\nEvaluating Masks:')
bbox_eval = COCOeval(gt_annotations, mask_dets, 'segm')
bbox_eval.evaluate()
bbox_eval.accumulate()
bbox_eval.summarize()
return
table, box_row, mask_row = calc_map(ap_data)
print(table)
return table, box_row, mask_row
iou_thresholds = [x / 100 for x in range(50, 100, 5)]
cuda = torch.cuda.is_available()
if __name__ == '__main__':
args = parser.parse_args()
strs = args.trained_model.split('_')
config = f'{strs[-3]}_{strs[-2]}_config'
update_config(config)
print(f'\nUsing \'{config}\' according to the trained_model.\n')
with torch.no_grad():
if cuda:
cudnn.benchmark = True
cudnn.fastest = True
torch.set_default_tensor_type('torch.cuda.FloatTensor')
else:
torch.set_default_tensor_type('torch.FloatTensor')
dataset = COCODetection(cfg.dataset.valid_images, cfg.dataset.valid_info, augmentation=BaseTransform())
net = Yolact()
net.load_weights('weights/' + args.trained_model, cuda)
net.eval()
print('\nModel loaded.\n')
if cuda:
net = net.cuda()
evaluate(net, dataset, args.max_num, False, args.cocoapi, args.traditional_nms)
| true | true |
f71ed5dd900f5b4c9d430cc903b05e70c9ce4cd5 | 554 | py | Python | mini_gplus/daos/post_cache.py | KTachibanaM/pill-city | 8a5a9c6a46c79f2e5c33c598d931360c319b75f4 | [
"MIT"
] | 16 | 2021-08-03T07:03:15.000Z | 2021-09-23T09:09:57.000Z | mini_gplus/daos/post_cache.py | KTachibanaM/pill-city | 8a5a9c6a46c79f2e5c33c598d931360c319b75f4 | [
"MIT"
] | 193 | 2021-07-29T09:43:54.000Z | 2021-10-05T05:18:55.000Z | mini_gplus/daos/post_cache.py | KTachibanaM/mini-gplus | 1881bef983797fe99e1499fb794976cb48863c70 | [
"MIT"
] | 3 | 2021-09-14T10:08:41.000Z | 2021-09-20T12:47:40.000Z | from bson import ObjectId
from mini_gplus.models import Post
from mini_gplus.utils.profiling import timer
from .cache import r
RPost = "post"
def set_in_post_cache(post: Post):
r.hset(RPost, str(post.id), post.to_json())
@timer
def get_in_post_cache(oid: ObjectId):
r_post = r.hget(RPost, str(oid))
if not r_post:
post = Post.objects.get(id=oid)
set_in_post_cache(post)
return post
return Post.from_json(r_post.decode('utf-8'))
def exists_in_post_cache(oid: ObjectId):
return r.hexists(RPost, str(oid))
| 22.16 | 49 | 0.705776 | from bson import ObjectId
from mini_gplus.models import Post
from mini_gplus.utils.profiling import timer
from .cache import r
RPost = "post"
def set_in_post_cache(post: Post):
r.hset(RPost, str(post.id), post.to_json())
@timer
def get_in_post_cache(oid: ObjectId):
r_post = r.hget(RPost, str(oid))
if not r_post:
post = Post.objects.get(id=oid)
set_in_post_cache(post)
return post
return Post.from_json(r_post.decode('utf-8'))
def exists_in_post_cache(oid: ObjectId):
return r.hexists(RPost, str(oid))
| true | true |
f71ed605bc82fb5d11760fc4169a8b8a0d93880d | 23 | py | Python | trimesh/version.py | pramukta/trimesh | 4b01ef0ea2d6f8a8bcc981d87dd3ae097bc62f11 | [
"MIT"
] | null | null | null | trimesh/version.py | pramukta/trimesh | 4b01ef0ea2d6f8a8bcc981d87dd3ae097bc62f11 | [
"MIT"
] | null | null | null | trimesh/version.py | pramukta/trimesh | 4b01ef0ea2d6f8a8bcc981d87dd3ae097bc62f11 | [
"MIT"
] | null | null | null | __version__ = '3.1.11'
| 11.5 | 22 | 0.652174 | __version__ = '3.1.11'
| true | true |
f71edb5720eb737000f91add91b888d1d80e37fe | 2,047 | py | Python | book/conf.py | Superwallah/tip-toi-reveng | 2e7a8e3e70805c7c59e920151aa34da466eeeddb | [
"MIT"
] | null | null | null | book/conf.py | Superwallah/tip-toi-reveng | 2e7a8e3e70805c7c59e920151aa34da466eeeddb | [
"MIT"
] | null | null | null | book/conf.py | Superwallah/tip-toi-reveng | 2e7a8e3e70805c7c59e920151aa34da466eeeddb | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from recommonmark.parser import CommonMarkParser
source_parsers = {
'.md': CommonMarkParser,
}
extensions = []
templates_path = ['_templates']
source_suffix = ['.rst', '.md']
master_doc = 'index'
# General information about the project.
project = u'Das tttool-Buch'
copyright = u'2019, Joachim Breitner'
author = u'Joachim Breitner'
version = u'1.8.1'
release = version
language = 'de'
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
pygments_style = 'sphinx'
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
html_theme = 'default'
html_static_path = ['_static']
# -- Options for HTMLHelp output ------------------------------------------
htmlhelp_basename = 'tttool'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'tttool.tex', u'Das tttool-Buch',
u'Joachim Breitner', 'manual'),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'tttool', u'Das tttool-Buch',
[author], 1)
]
# -- Options for Texinfo output -------------------------------------------
texinfo_documents = [
(master_doc, 'tttool', u'Das tttool-Buch',
author, 'tttool', 'Tiptoi zum selberbasteln',
'Miscellaneous'),
]
# -- Options for Epub output ----------------------------------------------
# Bibliographic Dublin Core info.
epub_title = project
epub_author = author
epub_publisher = author
epub_copyright = copyright
epub_identifier = 'http://tttool.entropia.de'
# A unique identification for the text.
#
# epub_uid = ''
# A list of files that should not be packed into the epub file.
epub_exclude_files = ['search.html']
| 24.369048 | 75 | 0.611138 |
from recommonmark.parser import CommonMarkParser
source_parsers = {
'.md': CommonMarkParser,
}
extensions = []
templates_path = ['_templates']
source_suffix = ['.rst', '.md']
master_doc = 'index'
project = u'Das tttool-Buch'
copyright = u'2019, Joachim Breitner'
author = u'Joachim Breitner'
version = u'1.8.1'
release = version
language = 'de'
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
pygments_style = 'sphinx'
todo_include_todos = False
html_theme = 'default'
html_static_path = ['_static']
htmlhelp_basename = 'tttool'
latex_elements = {
}
latex_documents = [
(master_doc, 'tttool.tex', u'Das tttool-Buch',
u'Joachim Breitner', 'manual'),
]
man_pages = [
(master_doc, 'tttool', u'Das tttool-Buch',
[author], 1)
]
texinfo_documents = [
(master_doc, 'tttool', u'Das tttool-Buch',
author, 'tttool', 'Tiptoi zum selberbasteln',
'Miscellaneous'),
]
epub_title = project
epub_author = author
epub_publisher = author
epub_copyright = copyright
epub_identifier = 'http://tttool.entropia.de'
epub_exclude_files = ['search.html']
| true | true |
f71edbd83661cc9f7d04a5509ea567a09ae80c46 | 1,163 | py | Python | 693.binary-number-with-alternating-bits.py | Lonitch/hackerRank | 84991b8340e725422bc47eec664532cc84a3447e | [
"MIT"
] | null | null | null | 693.binary-number-with-alternating-bits.py | Lonitch/hackerRank | 84991b8340e725422bc47eec664532cc84a3447e | [
"MIT"
] | null | null | null | 693.binary-number-with-alternating-bits.py | Lonitch/hackerRank | 84991b8340e725422bc47eec664532cc84a3447e | [
"MIT"
] | null | null | null | #
# @lc app=leetcode id=693 lang=python3
#
# [693] Binary Number with Alternating Bits
#
# https://leetcode.com/problems/binary-number-with-alternating-bits/description/
#
# algorithms
# Easy (58.47%)
# Likes: 359
# Dislikes: 74
# Total Accepted: 52.4K
# Total Submissions: 89.1K
# Testcase Example: '5'
#
# Given a positive integer, check whether it has alternating bits: namely, if
# two adjacent bits will always have different values.
#
# Example 1:
#
# Input: 5
# Output: True
# Explanation:
# The binary representation of 5 is: 101
#
#
#
# Example 2:
#
# Input: 7
# Output: False
# Explanation:
# The binary representation of 7 is: 111.
#
#
#
# Example 3:
#
# Input: 11
# Output: False
# Explanation:
# The binary representation of 11 is: 1011.
#
#
#
# Example 4:
#
# Input: 10
# Output: True
# Explanation:
# The binary representation of 10 is: 1010.
#
#
#
# @lc code=start
class Solution:
def hasAlternatingBits(self, n: int) -> bool:
b = list(bin(n)[2:])
if len(b)<2:
return True
else:
return b[0]!=b[1] and len(set(b[::2]))==1 and len(set(b[1::2]))==1
# @lc code=end
| 17.358209 | 80 | 0.625107 |
class Solution:
def hasAlternatingBits(self, n: int) -> bool:
b = list(bin(n)[2:])
if len(b)<2:
return True
else:
return b[0]!=b[1] and len(set(b[::2]))==1 and len(set(b[1::2]))==1
| true | true |
f71edc46484d5881b91881dddb4abd954210f9ec | 2,782 | py | Python | setup.py | nealde/Ampere | 75fa9c34940a71ef865eb98b551b4a4a27da96c3 | [
"MIT"
] | 19 | 2019-03-25T09:49:47.000Z | 2022-02-20T07:40:18.000Z | setup.py | nealde/battery | 75fa9c34940a71ef865eb98b551b4a4a27da96c3 | [
"MIT"
] | 6 | 2018-09-28T19:27:25.000Z | 2019-01-07T16:00:09.000Z | setup.py | nealde/battery | 75fa9c34940a71ef865eb98b551b4a4a27da96c3 | [
"MIT"
] | 6 | 2019-01-25T16:50:56.000Z | 2021-01-29T21:36:32.000Z | import setuptools
import pkg_resources
from setuptools import setup, Extension
def is_installed(requirement):
try:
pkg_resources.require(requirement)
except pkg_resources.ResolutionError:
return False
else:
return True
if not is_installed('numpy>=1.11.0'):
print("""
Error: numpy needs to be installed first. You can install it via:
$ pip install numpy
""")
exit(1)
if not is_installed('Cython>=0.29'):
print("""
Error: cython needs to be installed first. You can install it via:
$ pip install cython
""")
exit(1)
import numpy
from Cython.Distutils import build_ext
from Cython.Build import cythonize
with open("README.md", "r") as fh:
long_description = fh.read()
ida_dir = "ampere/models/ida"
ida_files = ['ida.c', 'ida_band.c', 'ida_dense.c', 'ida_direct.c', 'ida_ic.c', 'ida_io.c', 'nvector_serial.c', 'sundials_band.c', 'sundials_dense.c', 'sundials_direct.c', 'sundials_math.c', 'sundials_nvector.c']
ida_requirements1 = [ida_dir + '/' + ida_file for ida_file in ida_files]
ext_modules = [
Extension("ampere.models.P2D.P2D_fd", ["ampere/models/P2D/P2D_fd.pyx", "ampere/models/P2D/P2D_fd.c", *ida_requirements1], include_dirs=[numpy.get_include()]),
Extension("ampere.models.SPM.SPM_fd", ["ampere/models/SPM/SPM_fd.pyx", "ampere/models/SPM/SPM_fd.c", *ida_requirements1], include_dirs=[numpy.get_include()]),
Extension("ampere.models.SPM.SPM_fd_sei", ["ampere/models/SPM/SPM_fd_sei.pyx", "ampere/models/SPM/SPM_fd_sei.c", *ida_requirements1], include_dirs=[numpy.get_include()]),
Extension("ampere.models.SPM.SPM_par", ["ampere/models/SPM/SPM_par.pyx", "ampere/models/SPM/SPM_par.c", *ida_requirements1], include_dirs=[numpy.get_include()]),
]
cmdclass = {'build_ext': build_ext}
print(setuptools.find_packages())
setup(
name="ampere",
version="0.5.4",
author="Neal Dawson-Elli",
author_email="nealde@uw.edu",
description="A Python package for working with battery discharge data and physics-based battery models",
cmdclass=cmdclass,
ext_modules=cythonize(ext_modules, compiler_directives={'language_level' : "3"}),
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/nealde/Ampere",
packages=[*setuptools.find_packages()],
install_requires=['cython', 'matplotlib < 3.4', 'numpy', 'scipy'],
classifiers=[
"Programming Language :: Python :: 3",
'Programming Language :: Cython',
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
'Topic :: Scientific/Engineering :: Mathematics',
],
keywords="battery numerical simulation modeling",
)
| 36.12987 | 211 | 0.686556 | import setuptools
import pkg_resources
from setuptools import setup, Extension
def is_installed(requirement):
try:
pkg_resources.require(requirement)
except pkg_resources.ResolutionError:
return False
else:
return True
if not is_installed('numpy>=1.11.0'):
print("""
Error: numpy needs to be installed first. You can install it via:
$ pip install numpy
""")
exit(1)
if not is_installed('Cython>=0.29'):
print("""
Error: cython needs to be installed first. You can install it via:
$ pip install cython
""")
exit(1)
import numpy
from Cython.Distutils import build_ext
from Cython.Build import cythonize
with open("README.md", "r") as fh:
long_description = fh.read()
ida_dir = "ampere/models/ida"
ida_files = ['ida.c', 'ida_band.c', 'ida_dense.c', 'ida_direct.c', 'ida_ic.c', 'ida_io.c', 'nvector_serial.c', 'sundials_band.c', 'sundials_dense.c', 'sundials_direct.c', 'sundials_math.c', 'sundials_nvector.c']
ida_requirements1 = [ida_dir + '/' + ida_file for ida_file in ida_files]
ext_modules = [
Extension("ampere.models.P2D.P2D_fd", ["ampere/models/P2D/P2D_fd.pyx", "ampere/models/P2D/P2D_fd.c", *ida_requirements1], include_dirs=[numpy.get_include()]),
Extension("ampere.models.SPM.SPM_fd", ["ampere/models/SPM/SPM_fd.pyx", "ampere/models/SPM/SPM_fd.c", *ida_requirements1], include_dirs=[numpy.get_include()]),
Extension("ampere.models.SPM.SPM_fd_sei", ["ampere/models/SPM/SPM_fd_sei.pyx", "ampere/models/SPM/SPM_fd_sei.c", *ida_requirements1], include_dirs=[numpy.get_include()]),
Extension("ampere.models.SPM.SPM_par", ["ampere/models/SPM/SPM_par.pyx", "ampere/models/SPM/SPM_par.c", *ida_requirements1], include_dirs=[numpy.get_include()]),
]
cmdclass = {'build_ext': build_ext}
print(setuptools.find_packages())
setup(
name="ampere",
version="0.5.4",
author="Neal Dawson-Elli",
author_email="nealde@uw.edu",
description="A Python package for working with battery discharge data and physics-based battery models",
cmdclass=cmdclass,
ext_modules=cythonize(ext_modules, compiler_directives={'language_level' : "3"}),
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/nealde/Ampere",
packages=[*setuptools.find_packages()],
install_requires=['cython', 'matplotlib < 3.4', 'numpy', 'scipy'],
classifiers=[
"Programming Language :: Python :: 3",
'Programming Language :: Cython',
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
'Topic :: Scientific/Engineering :: Mathematics',
],
keywords="battery numerical simulation modeling",
)
| true | true |
f71edc5b2c821754fa672bd832207da961a8f04e | 20,410 | py | Python | bot.py | themagicalmammal/wikibot | 9f7b5f588bbf69ad66af50d573efd6d30c5a64d6 | [
"MIT"
] | 8 | 2021-02-18T10:08:59.000Z | 2022-01-23T00:39:17.000Z | bot.py | themagicalmammal/wikibot | 9f7b5f588bbf69ad66af50d573efd6d30c5a64d6 | [
"MIT"
] | 7 | 2021-02-04T07:21:46.000Z | 2021-08-30T08:34:45.000Z | bot.py | themagicalmammal/wikibot | 9f7b5f588bbf69ad66af50d573efd6d30c5a64d6 | [
"MIT"
] | 1 | 2021-05-22T06:15:59.000Z | 2021-05-22T06:15:59.000Z | from os import environ
from firebase_admin import credentials, db, initialize_app
from flask import Flask, request
from telebot import TeleBot, types
from wikipedia import (
WikipediaPage,
geosearch,
page,
random,
search,
set_lang,
suggest,
summary,
)
# Firebase connection
cred = credentials.Certificate("firebase.json") # Firebase key
initialize_app(
cred, {"databaseURL": "https://yourappname-user-default-rtdb.firebaseio.com/"}
)
ref = db.reference("/")
z = ref.get()
# Telegram API
TOKEN = "" # Bot key
bot = TeleBot(TOKEN)
# Flask connection
server = Flask(__name__)
# Common Messages
error = "Wrong word, use <b>title</b>"
error2 = "Wrong word, use <b>suggest</b>"
word = " for the word..."
# Languages
lang_list = [
"aa",
"ab",
"abs",
"ace",
"ady",
"ady-cyrl",
"aeb",
"aeb-arab",
"aeb-latn",
"af",
"ak",
"aln",
"als",
"alt",
"am",
"ami",
"an",
"ang",
"anp",
"ar",
"arc",
"arn",
"arq",
"ary",
"arz",
"as",
"ase",
"ast",
"atj",
"av",
"avk",
"awa",
"ay",
"az",
"azb",
"ba",
"ban",
"ban-bali",
"bar",
"bat-smg",
"bbc",
"bbc-latn",
"bcc",
"bcl",
"be",
"be-tarask",
"be-x-old",
"bg",
"bgn",
"bh",
"bho",
"bi",
"bjn",
"bm",
"bn",
"bo",
"bpy",
"bqi",
"br",
"brh",
"bs",
"btm",
"bto",
"bug",
"bxr",
"ca",
"cbk-zam",
"cdo",
"ce",
"ceb",
"ch",
"cho",
"chr",
"chy",
"ckb",
"co",
"cps",
"cr",
"crh",
"crh-cyrl",
"crh-latn",
"cs",
"csb",
"cu",
"cv",
"cy",
"da",
"de",
"de-at",
"de-ch",
"de-formal",
"din",
"diq",
"dsb",
"dtp",
"dty",
"dv",
"dz",
"ee",
"egl",
"el",
"eml",
"en",
"en-ca",
"en-gb",
"eo",
"es",
"es-419",
"es-formal",
"et",
"eu",
"ext",
"fa",
"ff",
"fi",
"fit",
"fiu-vro",
"fj",
"fo",
"fr",
"frc",
"frp",
"frr",
"fur",
"fy",
"ga",
"gag",
"gan",
"gan-hans",
"gan-hant",
"gcr",
"gd",
"gl",
"glk",
"gn",
"gom",
"gom-deva",
"gom-latn",
"gor",
"got",
"grc",
"gsw",
"gu",
"gv",
"ha",
"hak",
"haw",
"he",
"hi",
"hif",
"hif-latn",
"hil",
"ho",
"hr",
"hrx",
"hsb",
"ht",
"hu",
"hu-formal",
"hy",
"hyw",
"hz",
"ia",
"id",
"ie",
"ig",
"ii",
"ik",
"ike-cans",
"ike-latn",
"ilo",
"inh",
"io",
"is",
"it",
"iu",
"ja",
"jam",
"jbo",
"jut",
"jv",
"ka",
"kaa",
"kab",
"kbd",
"kbd-cyrl",
"kbp",
"kg",
"khw",
"ki",
"kiu",
"kj",
"kjp",
"kk",
"kk-arab",
"kk-cn",
"kk-cyrl",
"kk-kz",
"kk-latn",
"kk-tr",
"kl",
"km",
"kn",
"ko",
"ko-kp",
"koi",
"kr",
"krc",
"kri",
"krj",
"krl",
"ks",
"ks-arab",
"ks-deva",
"ksh",
"ku",
"ku-arab",
"ku-latn",
"kum",
"kv",
"kw",
"ky",
"la",
"lad",
"lb",
"lbe",
"lez",
"lfn",
"lg",
"li",
"lij",
"liv",
"lki",
"lld",
"lmo",
"ln",
"lo",
"loz",
"lrc",
"lt",
"ltg",
"lus",
"luz",
"lv",
"lzh",
"lzz",
"mad",
"mai",
"map-bms",
"mdf",
"mg",
"mh",
"mhr",
"mi",
"min",
"mk",
"ml",
"mn",
"mni",
"mnw",
"mo",
"mr",
"mrh",
"mrj",
"ms",
"mt",
"mus",
"mwl",
"my",
"myv",
"mzn",
"na",
"nah",
"nan",
"nap",
"nb",
"nds",
"nds-nl",
"ne",
"new",
"ng",
"nia",
"niu",
"nl",
"nl-informal",
"nn",
"no",
"nov",
"nqo",
"nrm",
"nso",
"nv",
"ny",
"nys",
"oc",
"olo",
"om",
"or",
"os",
"pa",
"pag",
"pam",
"pap",
"pcd",
"pdc",
"pdt",
"pfl",
"pi",
"pih",
"pl",
"pms",
"pnb",
"pnt",
"prg",
"ps",
"pt",
"pt-br",
"qu",
"qug",
"rgn",
"rif",
"rm",
"rmy",
"rn",
"ro",
"roa-rup",
"roa-tara",
"ru",
"rue",
"rup",
"ruq",
"ruq-cyrl",
"ruq-latn",
"rw",
"sa",
"sah",
"sat",
"sc",
"scn",
"sco",
"sd",
"sdc",
"sdh",
"se",
"sei",
"ses",
"sg",
"sgs",
"sh",
"shi",
"shi-latn",
"shi-tfng",
"shn",
"shy-latn",
"si",
"simple",
"sk",
"skr",
"skr-arab",
"sl",
"sli",
"sm",
"sma",
"smn",
"sn",
"so",
"sq",
"sr",
"sr-ec",
"sr-el",
"srn",
"ss",
"st",
"stq",
"sty",
"su",
"sv",
"sw",
"szl",
"szy",
"ta",
"tay",
"tcy",
"te",
"tet",
"tg",
"tg-cyrl",
"tg-latn",
"th",
"ti",
"tk",
"tl",
"tly",
"tn",
"to",
"tpi",
"tr",
"tru",
"trv",
"ts",
"tt",
"tt-cyrl",
"tt-latn",
"tum",
"tw",
"ty",
"tyv",
"tzm",
"udm",
"ug",
"ug-arab",
"ug-latn",
"uk",
"ur",
"uz",
"uz-cyrl",
"uz-latn",
"ve",
"vec",
"vep",
"vi",
"vls",
"vmf",
"vo",
"vot",
"vro",
"wa",
"war",
"wo",
"wuu",
"xal",
"xh",
"xmf",
"xsy",
"yi",
"yo",
"yue",
"za",
"zea",
"zgh",
"zh",
"zh-classical",
"zh-cn",
"zh-hans",
"zh-hant",
"zh-hk",
"zh-min-nan",
"zh-mo",
"zh-my",
"zh-sg",
"zh-tw",
"zh-yue",
"zu",
]
def main_keyboard():
markup = types.ReplyKeyboardMarkup(
row_width=2, resize_keyboard=True, one_time_keyboard=True
)
texts = [
"Definition 📖",
"Title 🖊️️",
"URL 🔗",
"Language 🔣",
"Random 🔀",
"Help ⚠️",
"Map 🗺️",
"Nearby 📍",
]
buttons = []
for text in texts:
button = types.KeyboardButton(text)
buttons.append(button)
markup.add(*buttons)
return markup
def support_keyboard():
markup = types.ReplyKeyboardMarkup(
row_width=2, resize_keyboard=True, one_time_keyboard=True
)
texts = ["🧑🏻💻️ Dev", "🐛 Bug", "💻️ Source", "🔙 Back"]
buttons = []
for text in texts:
button = types.KeyboardButton(text)
buttons.append(button)
markup.add(*buttons)
return markup
def extra_keyboard():
markup = types.ReplyKeyboardMarkup(
row_width=2, resize_keyboard=True, one_time_keyboard=True
)
texts = ["Suggest 💡", "Fluky 💫", "Back ⬅️"]
buttons = []
for text in texts:
button = types.KeyboardButton(text)
buttons.append(button)
markup.add(*buttons)
return markup
def check(text, command):
checker = str(text).replace("/", "").replace("#", "").lower().split(" ")
if command in checker:
return 1
return 0
def change_lan(message):
user_id = str(message.from_user.id)
set_lang(z[user_id])
@bot.message_handler(func=lambda message: check(message.text, "start"))
def welcome(message):
user_id = message.from_user.id
ref.update({user_id: "en"})
welcome_msg = (
"Greetings " + message.from_user.first_name + ", I am Wikibot 🤖\n\n"
"What can I do? Use <b>help</b>."
)
bot.send_message(
chat_id=message.chat.id,
text=welcome_msg,
parse_mode="html",
reply_markup=main_keyboard(),
)
@bot.message_handler(func=lambda message: check(message.text, "definition"))
def definition(message):
def_msg = bot.reply_to(message, "<b>Definition</b>" + word, parse_mode="html")
bot.register_next_step_handler(def_msg, process_definition)
def process_definition(message):
try:
def_msg = str(message.text)
change_lan(message)
def_str = summary(def_msg, sentences=10)
def_split = def_str.split("\n\n", 1)[0]
bot.send_message(
chat_id=message.chat.id,
text="<b>" + def_msg + "</b> \n\n" + def_split,
parse_mode="html",
reply_markup=main_keyboard(),
)
except Exception as c:
if str(c)[:7] == "Page id":
msg = "<b>Not Found!</b>"
else:
msg = str(c).replace("may refer to", "<b>may refer to</b>")
bot.send_message(
chat_id=message.chat.id,
text=msg,
parse_mode="html",
reply_markup=main_keyboard(),
)
@bot.message_handler(func=lambda message: check(message.text, "title"))
def title(message):
title_msg = bot.reply_to(message, "<b>Title</b>" + word, parse_mode="html")
bot.register_next_step_handler(title_msg, process_title)
def process_title(message):
try:
title_msg = str(message.text)
change_lan(message)
title_result = search(title_msg)
if title_result:
bot.send_message(
chat_id=message.chat.id,
text="Possible titles are...",
parse_mode="html",
)
for i in title_result:
bot.send_message(
chat_id=message.chat.id,
text=i.replace(title_msg, "<b>" + title_msg + "</b>").replace(
title_msg.lower(), "<b>" + title_msg.lower() + "</b>"
),
parse_mode="html",
reply_markup=main_keyboard(),
)
else:
bot.send_message(
chat_id=message.chat.id,
text=error2,
parse_mode="html",
reply_markup=main_keyboard(),
)
except Exception:
bot.send_message(
chat_id=message.chat.id,
text=error2,
parse_mode="html",
reply_markup=main_keyboard(),
)
@bot.message_handler(func=lambda message: check(message.text, "help"))
def aid(message):
text = (
"These keywords can be used to control me - \n\n"
"<b>Primary</b> \n"
"Definition 📖 - fetches definition of a word \n"
"Title 🖊️️ - fetches a bunch of related titles\n"
"URL 🔗 - gives the URL of wiki page of the word \n"
"Prefix 🔡 - show all available languages \n"
"Language 🔣 - set the language you want \n\n"
"<b>Secondary</b> \n"
"Nearby 📍 - locations near a coordinate \n"
"Map 🗺️ - location in map with wiki database \n"
"Random 🔀 - pops a random article from wiki \n\n"
"<b>Extra</b> \n"
"Fluky 💫 - fetches a random title from wiki \n"
"Suggest 💡 - returns a suggested word if found \n"
)
bot.send_message(
chat_id=message.chat.id,
text=text,
parse_mode="html",
reply_markup=main_keyboard(),
)
@bot.message_handler(func=lambda message: check(message.text, "url"))
def url(message):
url_msg = bot.reply_to(message, "<b>URL</b>" + word, parse_mode="html")
bot.register_next_step_handler(url_msg, process_url)
def process_url(message):
try:
url_message = str(message.text)
change_lan(message)
url_page = page(url_message).url
url_link = "<a href='" + url_page + "'>🔗</a>"
bot.send_message(
chat_id=message.chat.id,
text=url_link + "for <b>" + url_message + "</b>",
parse_mode="html",
reply_markup=main_keyboard(),
)
except Exception:
bot.send_message(
chat_id=message.chat.id,
text=error,
parse_mode="html",
reply_markup=main_keyboard(),
)
@bot.message_handler(func=lambda message: check(message.text, "language"))
def ln(message):
ln_msg = bot.reply_to(
message, "Type the prefix of your <b>language</b>...", parse_mode="html"
)
bot.register_next_step_handler(ln_msg, process_ln)
def process_ln(message):
try:
ln_msg = str(message.text).lower()
if ln_msg in lang_list:
user_id = message.from_user.id
ref.update({user_id: str(message.text).lower()})
global z
z = ref.get()
text = "Set Successfully."
else:
text = (
"Please, check for the correct <a href="
'"https://github.com/themagicalmammal/wikibot/blob/master/Lang.md"'
">prefix</a>."
)
bot.send_message(
chat_id=message.chat.id,
text=text,
parse_mode="html",
reply_markup=main_keyboard(),
)
except Exception:
bot.send_message(
chat_id=message.chat.id,
text="Error, changing language",
reply_markup=main_keyboard(),
)
@bot.message_handler(func=lambda message: check(message.text, "support"))
def support(message):
text = (
"Support commands that I provide - \n\n"
"Bugs 🐛 - to report bugs or suggest mods\n"
"Dev 🧑🏻💻️ - provides information about my creator\n"
"Source 💻️ - to view the source code"
)
bot.send_message(
chat_id=message.chat.id,
text=text,
parse_mode="html",
reply_markup=support_keyboard(),
)
@bot.message_handler(func=lambda message: check(message.text, "prefix"))
def prefix(message):
text = (
"Language is set with the help of it's Prefix. \n<b>Example</b> - English:en<a "
'href="https://github.com/themagicalmammal/wikibot/blob/master/Lang.md"'
">.</a>"
)
bot.send_message(
chat_id=message.chat.id,
text=text,
parse_mode="html",
reply_markup=main_keyboard(),
)
@bot.message_handler(func=lambda message: check(message.text, "random"))
def randomize(message):
try:
change_lan(message)
random_title = page(random(pages=1)).url
random_text = "<a href='" + random_title + "'>✨</a>"
bot.send_message(
chat_id=message.chat.id,
text=random_text,
parse_mode="html",
reply_markup=main_keyboard(),
)
except:
randomize(message)
@bot.message_handler(func=lambda message: check(message.text, "map"))
def chart(message):
co_msg = bot.reply_to(message, "<b>Location</b> of the place...", parse_mode="html")
bot.register_next_step_handler(co_msg, process_co)
def process_co(message):
try:
co_msg = str(message.text)
set_lang("en")
lat, lon = WikipediaPage(co_msg).coordinates
bot.send_message(
chat_id=message.chat.id, text=str(round(lat, 5)) + ", " + str(round(lon, 5))
)
bot.send_location(
chat_id=message.chat.id,
latitude=lat,
longitude=lon,
reply_markup=main_keyboard(),
)
except Exception:
bot.send_message(
chat_id=message.chat.id,
text="Not a location.",
reply_markup=main_keyboard(),
)
@bot.message_handler(func=lambda message: check(message.text, "nearby"))
def geo(message):
geo_msg = bot.reply_to(
message, "Send me the <b>coordinates</b>...", parse_mode="html"
)
bot.register_next_step_handler(geo_msg, process_geo)
def process_geo(message):
try:
lat, lan = (
str(message.text)
.replace("E", "")
.replace("W", "")
.replace("N", "")
.replace("S", "")
.replace("° ", "")
.replace("°", "")
.replace(",", "")
.replace(" ", " ")
.split(" ")
)
set_lang("en")
locations = geosearch(latitude=lat, longitude=lan, results=10, radius=1000)
if locations:
nearby = "<b>Nearby locations</b> are..."
bot.send_message(
chat_id=message.chat.id,
text=nearby,
parse_mode="html",
reply_markup=main_keyboard(),
)
for i in locations:
bot.send_message(
chat_id=message.chat.id, text=i, reply_markup=main_keyboard()
)
else:
sorry = "Sorry, can't find nearby locations"
bot.send_message(
chat_id=message.chat.id, text=sorry, reply_markup=main_keyboard()
)
except Exception:
bot.send_message(
chat_id=message.chat.id,
text="Use <b>Map</b> to get coordinates.",
parse_mode="html",
reply_markup=main_keyboard(),
)
@bot.message_handler(func=lambda message: check(message.text, "fluky"))
def fluky(message):
change_lan(message)
fluky_title = random(pages=1)
bot.send_message(
chat_id=message.chat.id, text=fluky_title, reply_markup=extra_keyboard()
)
@bot.message_handler(func=lambda message: check(message.text, "back"))
def back(message):
bot.send_message(
chat_id=message.chat.id,
text="<b>Commands</b>",
parse_mode="html",
reply_markup=main_keyboard(),
)
@bot.message_handler(func=lambda message: check(message.text, "suggest"))
def suggestion(message):
suggest_msg = bot.reply_to(
message, "<b>Suggestion</b> for the word...", parse_mode="html"
)
bot.register_next_step_handler(suggest_msg, process_suggest)
def process_suggest(message):
sorry = "Sorry, <b>no suggestions.</b>"
try:
suggest_msg = str(message.text)
change_lan(message)
suggest_str = str(suggest(suggest_msg))
if suggest_str != "None":
text = suggest_str
else:
text = sorry
bot.send_message(
chat_id=message.chat.id,
text=text,
parse_mode="html",
reply_markup=extra_keyboard(),
)
except Exception:
bot.send_message(
chat_id=message.chat.id,
text=sorry,
parse_mode="html",
reply_markup=extra_keyboard(),
)
@bot.message_handler(func=lambda message: check(message.text, "bug"))
def bug(message):
text = (
"Submit a Issue or Revision<a "
'href="https://github.com/themagicalmammal/wikibot/issues">.</a> '
)
bot.send_message(
chat_id=message.chat.id,
text=text,
parse_mode="html",
reply_markup=support_keyboard(),
)
@bot.message_handler(func=lambda message: check(message.text, "dev"))
def dev(message):
text = (
"I was made with ❤ by @themagicalmammal"
'<a href="https://github.com/themagicalmammal">.</a>'
)
bot.send_message(
chat_id=message.chat.id,
text=text,
parse_mode="html",
reply_markup=support_keyboard(),
)
@bot.message_handler(func=lambda message: check(message.text, "source"))
def source(message):
text = (
"Checkout out How I was made"
'<a href="https://github.com/themagicalmammal/wikibot">.</a>'
)
bot.send_message(
chat_id=message.chat.id,
text=text,
parse_mode="html",
reply_markup=support_keyboard(),
)
@bot.message_handler(func=lambda message: check(message.text, "extra"))
def extra(message):
bot.send_message(
chat_id=message.chat.id,
text="<b>Extra</b>",
parse_mode="html",
reply_markup=extra_keyboard(),
)
@bot.message_handler(func=lambda message: True)
def unrecognized(message):
bot.send_message(
chat_id=message.chat.id,
text="<b>Please</b>, use a keyword",
parse_mode="html",
reply_markup=main_keyboard(),
)
# Heroku Connection
@server.route("/" + TOKEN, methods=["POST"])
def establish():
bot.process_new_updates(
[types.Update.de_json(request.stream.read().decode("utf-8"))]
)
return "!", 200
@server.route("/")
def webhook():
bot.remove_webhook()
bot.set_webhook(url="https://yourappname.herokuapp.com/" + TOKEN)
return "!", 200
if __name__ == "__main__":
server.run(host="0.0.0.0", port=int(environ.get("PORT", 5000)))
| 20.595358 | 88 | 0.502303 | from os import environ
from firebase_admin import credentials, db, initialize_app
from flask import Flask, request
from telebot import TeleBot, types
from wikipedia import (
WikipediaPage,
geosearch,
page,
random,
search,
set_lang,
suggest,
summary,
)
cred = credentials.Certificate("firebase.json")
initialize_app(
cred, {"databaseURL": "https://yourappname-user-default-rtdb.firebaseio.com/"}
)
ref = db.reference("/")
z = ref.get()
TOKEN = ""
bot = TeleBot(TOKEN)
server = Flask(__name__)
error = "Wrong word, use <b>title</b>"
error2 = "Wrong word, use <b>suggest</b>"
word = " for the word..."
lang_list = [
"aa",
"ab",
"abs",
"ace",
"ady",
"ady-cyrl",
"aeb",
"aeb-arab",
"aeb-latn",
"af",
"ak",
"aln",
"als",
"alt",
"am",
"ami",
"an",
"ang",
"anp",
"ar",
"arc",
"arn",
"arq",
"ary",
"arz",
"as",
"ase",
"ast",
"atj",
"av",
"avk",
"awa",
"ay",
"az",
"azb",
"ba",
"ban",
"ban-bali",
"bar",
"bat-smg",
"bbc",
"bbc-latn",
"bcc",
"bcl",
"be",
"be-tarask",
"be-x-old",
"bg",
"bgn",
"bh",
"bho",
"bi",
"bjn",
"bm",
"bn",
"bo",
"bpy",
"bqi",
"br",
"brh",
"bs",
"btm",
"bto",
"bug",
"bxr",
"ca",
"cbk-zam",
"cdo",
"ce",
"ceb",
"ch",
"cho",
"chr",
"chy",
"ckb",
"co",
"cps",
"cr",
"crh",
"crh-cyrl",
"crh-latn",
"cs",
"csb",
"cu",
"cv",
"cy",
"da",
"de",
"de-at",
"de-ch",
"de-formal",
"din",
"diq",
"dsb",
"dtp",
"dty",
"dv",
"dz",
"ee",
"egl",
"el",
"eml",
"en",
"en-ca",
"en-gb",
"eo",
"es",
"es-419",
"es-formal",
"et",
"eu",
"ext",
"fa",
"ff",
"fi",
"fit",
"fiu-vro",
"fj",
"fo",
"fr",
"frc",
"frp",
"frr",
"fur",
"fy",
"ga",
"gag",
"gan",
"gan-hans",
"gan-hant",
"gcr",
"gd",
"gl",
"glk",
"gn",
"gom",
"gom-deva",
"gom-latn",
"gor",
"got",
"grc",
"gsw",
"gu",
"gv",
"ha",
"hak",
"haw",
"he",
"hi",
"hif",
"hif-latn",
"hil",
"ho",
"hr",
"hrx",
"hsb",
"ht",
"hu",
"hu-formal",
"hy",
"hyw",
"hz",
"ia",
"id",
"ie",
"ig",
"ii",
"ik",
"ike-cans",
"ike-latn",
"ilo",
"inh",
"io",
"is",
"it",
"iu",
"ja",
"jam",
"jbo",
"jut",
"jv",
"ka",
"kaa",
"kab",
"kbd",
"kbd-cyrl",
"kbp",
"kg",
"khw",
"ki",
"kiu",
"kj",
"kjp",
"kk",
"kk-arab",
"kk-cn",
"kk-cyrl",
"kk-kz",
"kk-latn",
"kk-tr",
"kl",
"km",
"kn",
"ko",
"ko-kp",
"koi",
"kr",
"krc",
"kri",
"krj",
"krl",
"ks",
"ks-arab",
"ks-deva",
"ksh",
"ku",
"ku-arab",
"ku-latn",
"kum",
"kv",
"kw",
"ky",
"la",
"lad",
"lb",
"lbe",
"lez",
"lfn",
"lg",
"li",
"lij",
"liv",
"lki",
"lld",
"lmo",
"ln",
"lo",
"loz",
"lrc",
"lt",
"ltg",
"lus",
"luz",
"lv",
"lzh",
"lzz",
"mad",
"mai",
"map-bms",
"mdf",
"mg",
"mh",
"mhr",
"mi",
"min",
"mk",
"ml",
"mn",
"mni",
"mnw",
"mo",
"mr",
"mrh",
"mrj",
"ms",
"mt",
"mus",
"mwl",
"my",
"myv",
"mzn",
"na",
"nah",
"nan",
"nap",
"nb",
"nds",
"nds-nl",
"ne",
"new",
"ng",
"nia",
"niu",
"nl",
"nl-informal",
"nn",
"no",
"nov",
"nqo",
"nrm",
"nso",
"nv",
"ny",
"nys",
"oc",
"olo",
"om",
"or",
"os",
"pa",
"pag",
"pam",
"pap",
"pcd",
"pdc",
"pdt",
"pfl",
"pi",
"pih",
"pl",
"pms",
"pnb",
"pnt",
"prg",
"ps",
"pt",
"pt-br",
"qu",
"qug",
"rgn",
"rif",
"rm",
"rmy",
"rn",
"ro",
"roa-rup",
"roa-tara",
"ru",
"rue",
"rup",
"ruq",
"ruq-cyrl",
"ruq-latn",
"rw",
"sa",
"sah",
"sat",
"sc",
"scn",
"sco",
"sd",
"sdc",
"sdh",
"se",
"sei",
"ses",
"sg",
"sgs",
"sh",
"shi",
"shi-latn",
"shi-tfng",
"shn",
"shy-latn",
"si",
"simple",
"sk",
"skr",
"skr-arab",
"sl",
"sli",
"sm",
"sma",
"smn",
"sn",
"so",
"sq",
"sr",
"sr-ec",
"sr-el",
"srn",
"ss",
"st",
"stq",
"sty",
"su",
"sv",
"sw",
"szl",
"szy",
"ta",
"tay",
"tcy",
"te",
"tet",
"tg",
"tg-cyrl",
"tg-latn",
"th",
"ti",
"tk",
"tl",
"tly",
"tn",
"to",
"tpi",
"tr",
"tru",
"trv",
"ts",
"tt",
"tt-cyrl",
"tt-latn",
"tum",
"tw",
"ty",
"tyv",
"tzm",
"udm",
"ug",
"ug-arab",
"ug-latn",
"uk",
"ur",
"uz",
"uz-cyrl",
"uz-latn",
"ve",
"vec",
"vep",
"vi",
"vls",
"vmf",
"vo",
"vot",
"vro",
"wa",
"war",
"wo",
"wuu",
"xal",
"xh",
"xmf",
"xsy",
"yi",
"yo",
"yue",
"za",
"zea",
"zgh",
"zh",
"zh-classical",
"zh-cn",
"zh-hans",
"zh-hant",
"zh-hk",
"zh-min-nan",
"zh-mo",
"zh-my",
"zh-sg",
"zh-tw",
"zh-yue",
"zu",
]
def main_keyboard():
markup = types.ReplyKeyboardMarkup(
row_width=2, resize_keyboard=True, one_time_keyboard=True
)
texts = [
"Definition 📖",
"Title 🖊️️",
"URL 🔗",
"Language 🔣",
"Random 🔀",
"Help ⚠️",
"Map 🗺️",
"Nearby 📍",
]
buttons = []
for text in texts:
button = types.KeyboardButton(text)
buttons.append(button)
markup.add(*buttons)
return markup
def support_keyboard():
markup = types.ReplyKeyboardMarkup(
row_width=2, resize_keyboard=True, one_time_keyboard=True
)
texts = ["🧑🏻💻️ Dev", "🐛 Bug", "💻️ Source", "🔙 Back"]
buttons = []
for text in texts:
button = types.KeyboardButton(text)
buttons.append(button)
markup.add(*buttons)
return markup
def extra_keyboard():
markup = types.ReplyKeyboardMarkup(
row_width=2, resize_keyboard=True, one_time_keyboard=True
)
texts = ["Suggest 💡", "Fluky 💫", "Back ⬅️"]
buttons = []
for text in texts:
button = types.KeyboardButton(text)
buttons.append(button)
markup.add(*buttons)
return markup
def check(text, command):
checker = str(text).replace("/", "").replace("#", "").lower().split(" ")
if command in checker:
return 1
return 0
def change_lan(message):
user_id = str(message.from_user.id)
set_lang(z[user_id])
@bot.message_handler(func=lambda message: check(message.text, "start"))
def welcome(message):
user_id = message.from_user.id
ref.update({user_id: "en"})
welcome_msg = (
"Greetings " + message.from_user.first_name + ", I am Wikibot 🤖\n\n"
"What can I do? Use <b>help</b>."
)
bot.send_message(
chat_id=message.chat.id,
text=welcome_msg,
parse_mode="html",
reply_markup=main_keyboard(),
)
@bot.message_handler(func=lambda message: check(message.text, "definition"))
def definition(message):
def_msg = bot.reply_to(message, "<b>Definition</b>" + word, parse_mode="html")
bot.register_next_step_handler(def_msg, process_definition)
def process_definition(message):
try:
def_msg = str(message.text)
change_lan(message)
def_str = summary(def_msg, sentences=10)
def_split = def_str.split("\n\n", 1)[0]
bot.send_message(
chat_id=message.chat.id,
text="<b>" + def_msg + "</b> \n\n" + def_split,
parse_mode="html",
reply_markup=main_keyboard(),
)
except Exception as c:
if str(c)[:7] == "Page id":
msg = "<b>Not Found!</b>"
else:
msg = str(c).replace("may refer to", "<b>may refer to</b>")
bot.send_message(
chat_id=message.chat.id,
text=msg,
parse_mode="html",
reply_markup=main_keyboard(),
)
@bot.message_handler(func=lambda message: check(message.text, "title"))
def title(message):
title_msg = bot.reply_to(message, "<b>Title</b>" + word, parse_mode="html")
bot.register_next_step_handler(title_msg, process_title)
def process_title(message):
try:
title_msg = str(message.text)
change_lan(message)
title_result = search(title_msg)
if title_result:
bot.send_message(
chat_id=message.chat.id,
text="Possible titles are...",
parse_mode="html",
)
for i in title_result:
bot.send_message(
chat_id=message.chat.id,
text=i.replace(title_msg, "<b>" + title_msg + "</b>").replace(
title_msg.lower(), "<b>" + title_msg.lower() + "</b>"
),
parse_mode="html",
reply_markup=main_keyboard(),
)
else:
bot.send_message(
chat_id=message.chat.id,
text=error2,
parse_mode="html",
reply_markup=main_keyboard(),
)
except Exception:
bot.send_message(
chat_id=message.chat.id,
text=error2,
parse_mode="html",
reply_markup=main_keyboard(),
)
@bot.message_handler(func=lambda message: check(message.text, "help"))
def aid(message):
text = (
"These keywords can be used to control me - \n\n"
"<b>Primary</b> \n"
"Definition 📖 - fetches definition of a word \n"
"Title 🖊️️ - fetches a bunch of related titles\n"
"URL 🔗 - gives the URL of wiki page of the word \n"
"Prefix 🔡 - show all available languages \n"
"Language 🔣 - set the language you want \n\n"
"<b>Secondary</b> \n"
"Nearby 📍 - locations near a coordinate \n"
"Map 🗺️ - location in map with wiki database \n"
"Random 🔀 - pops a random article from wiki \n\n"
"<b>Extra</b> \n"
"Fluky 💫 - fetches a random title from wiki \n"
"Suggest 💡 - returns a suggested word if found \n"
)
bot.send_message(
chat_id=message.chat.id,
text=text,
parse_mode="html",
reply_markup=main_keyboard(),
)
@bot.message_handler(func=lambda message: check(message.text, "url"))
def url(message):
url_msg = bot.reply_to(message, "<b>URL</b>" + word, parse_mode="html")
bot.register_next_step_handler(url_msg, process_url)
def process_url(message):
try:
url_message = str(message.text)
change_lan(message)
url_page = page(url_message).url
url_link = "<a href='" + url_page + "'>🔗</a>"
bot.send_message(
chat_id=message.chat.id,
text=url_link + "for <b>" + url_message + "</b>",
parse_mode="html",
reply_markup=main_keyboard(),
)
except Exception:
bot.send_message(
chat_id=message.chat.id,
text=error,
parse_mode="html",
reply_markup=main_keyboard(),
)
@bot.message_handler(func=lambda message: check(message.text, "language"))
def ln(message):
ln_msg = bot.reply_to(
message, "Type the prefix of your <b>language</b>...", parse_mode="html"
)
bot.register_next_step_handler(ln_msg, process_ln)
def process_ln(message):
try:
ln_msg = str(message.text).lower()
if ln_msg in lang_list:
user_id = message.from_user.id
ref.update({user_id: str(message.text).lower()})
global z
z = ref.get()
text = "Set Successfully."
else:
text = (
"Please, check for the correct <a href="
'"https://github.com/themagicalmammal/wikibot/blob/master/Lang.md"'
">prefix</a>."
)
bot.send_message(
chat_id=message.chat.id,
text=text,
parse_mode="html",
reply_markup=main_keyboard(),
)
except Exception:
bot.send_message(
chat_id=message.chat.id,
text="Error, changing language",
reply_markup=main_keyboard(),
)
@bot.message_handler(func=lambda message: check(message.text, "support"))
def support(message):
text = (
"Support commands that I provide - \n\n"
"Bugs 🐛 - to report bugs or suggest mods\n"
"Dev 🧑🏻💻️ - provides information about my creator\n"
"Source 💻️ - to view the source code"
)
bot.send_message(
chat_id=message.chat.id,
text=text,
parse_mode="html",
reply_markup=support_keyboard(),
)
@bot.message_handler(func=lambda message: check(message.text, "prefix"))
def prefix(message):
text = (
"Language is set with the help of it's Prefix. \n<b>Example</b> - English:en<a "
'href="https://github.com/themagicalmammal/wikibot/blob/master/Lang.md"'
">.</a>"
)
bot.send_message(
chat_id=message.chat.id,
text=text,
parse_mode="html",
reply_markup=main_keyboard(),
)
@bot.message_handler(func=lambda message: check(message.text, "random"))
def randomize(message):
try:
change_lan(message)
random_title = page(random(pages=1)).url
random_text = "<a href='" + random_title + "'>✨</a>"
bot.send_message(
chat_id=message.chat.id,
text=random_text,
parse_mode="html",
reply_markup=main_keyboard(),
)
except:
randomize(message)
@bot.message_handler(func=lambda message: check(message.text, "map"))
def chart(message):
co_msg = bot.reply_to(message, "<b>Location</b> of the place...", parse_mode="html")
bot.register_next_step_handler(co_msg, process_co)
def process_co(message):
try:
co_msg = str(message.text)
set_lang("en")
lat, lon = WikipediaPage(co_msg).coordinates
bot.send_message(
chat_id=message.chat.id, text=str(round(lat, 5)) + ", " + str(round(lon, 5))
)
bot.send_location(
chat_id=message.chat.id,
latitude=lat,
longitude=lon,
reply_markup=main_keyboard(),
)
except Exception:
bot.send_message(
chat_id=message.chat.id,
text="Not a location.",
reply_markup=main_keyboard(),
)
@bot.message_handler(func=lambda message: check(message.text, "nearby"))
def geo(message):
geo_msg = bot.reply_to(
message, "Send me the <b>coordinates</b>...", parse_mode="html"
)
bot.register_next_step_handler(geo_msg, process_geo)
def process_geo(message):
try:
lat, lan = (
str(message.text)
.replace("E", "")
.replace("W", "")
.replace("N", "")
.replace("S", "")
.replace("° ", "")
.replace("°", "")
.replace(",", "")
.replace(" ", " ")
.split(" ")
)
set_lang("en")
locations = geosearch(latitude=lat, longitude=lan, results=10, radius=1000)
if locations:
nearby = "<b>Nearby locations</b> are..."
bot.send_message(
chat_id=message.chat.id,
text=nearby,
parse_mode="html",
reply_markup=main_keyboard(),
)
for i in locations:
bot.send_message(
chat_id=message.chat.id, text=i, reply_markup=main_keyboard()
)
else:
sorry = "Sorry, can't find nearby locations"
bot.send_message(
chat_id=message.chat.id, text=sorry, reply_markup=main_keyboard()
)
except Exception:
bot.send_message(
chat_id=message.chat.id,
text="Use <b>Map</b> to get coordinates.",
parse_mode="html",
reply_markup=main_keyboard(),
)
@bot.message_handler(func=lambda message: check(message.text, "fluky"))
def fluky(message):
change_lan(message)
fluky_title = random(pages=1)
bot.send_message(
chat_id=message.chat.id, text=fluky_title, reply_markup=extra_keyboard()
)
@bot.message_handler(func=lambda message: check(message.text, "back"))
def back(message):
bot.send_message(
chat_id=message.chat.id,
text="<b>Commands</b>",
parse_mode="html",
reply_markup=main_keyboard(),
)
@bot.message_handler(func=lambda message: check(message.text, "suggest"))
def suggestion(message):
suggest_msg = bot.reply_to(
message, "<b>Suggestion</b> for the word...", parse_mode="html"
)
bot.register_next_step_handler(suggest_msg, process_suggest)
def process_suggest(message):
sorry = "Sorry, <b>no suggestions.</b>"
try:
suggest_msg = str(message.text)
change_lan(message)
suggest_str = str(suggest(suggest_msg))
if suggest_str != "None":
text = suggest_str
else:
text = sorry
bot.send_message(
chat_id=message.chat.id,
text=text,
parse_mode="html",
reply_markup=extra_keyboard(),
)
except Exception:
bot.send_message(
chat_id=message.chat.id,
text=sorry,
parse_mode="html",
reply_markup=extra_keyboard(),
)
@bot.message_handler(func=lambda message: check(message.text, "bug"))
def bug(message):
text = (
"Submit a Issue or Revision<a "
'href="https://github.com/themagicalmammal/wikibot/issues">.</a> '
)
bot.send_message(
chat_id=message.chat.id,
text=text,
parse_mode="html",
reply_markup=support_keyboard(),
)
@bot.message_handler(func=lambda message: check(message.text, "dev"))
def dev(message):
text = (
"I was made with ❤ by @themagicalmammal"
'<a href="https://github.com/themagicalmammal">.</a>'
)
bot.send_message(
chat_id=message.chat.id,
text=text,
parse_mode="html",
reply_markup=support_keyboard(),
)
@bot.message_handler(func=lambda message: check(message.text, "source"))
def source(message):
text = (
"Checkout out How I was made"
'<a href="https://github.com/themagicalmammal/wikibot">.</a>'
)
bot.send_message(
chat_id=message.chat.id,
text=text,
parse_mode="html",
reply_markup=support_keyboard(),
)
@bot.message_handler(func=lambda message: check(message.text, "extra"))
def extra(message):
bot.send_message(
chat_id=message.chat.id,
text="<b>Extra</b>",
parse_mode="html",
reply_markup=extra_keyboard(),
)
@bot.message_handler(func=lambda message: True)
def unrecognized(message):
bot.send_message(
chat_id=message.chat.id,
text="<b>Please</b>, use a keyword",
parse_mode="html",
reply_markup=main_keyboard(),
)
@server.route("/" + TOKEN, methods=["POST"])
def establish():
bot.process_new_updates(
[types.Update.de_json(request.stream.read().decode("utf-8"))]
)
return "!", 200
@server.route("/")
def webhook():
bot.remove_webhook()
bot.set_webhook(url="https://yourappname.herokuapp.com/" + TOKEN)
return "!", 200
if __name__ == "__main__":
server.run(host="0.0.0.0", port=int(environ.get("PORT", 5000)))
| true | true |
f71edc861b7845660a5affbfdfb2cda3e677aa82 | 2,517 | py | Python | src/process_event_api.py | AndreasVikke/ComputerScience-Final | 52d09a5876bfde661a00736712db6e3d19be877d | [
"MIT"
] | 1 | 2021-01-15T11:23:20.000Z | 2021-01-15T11:23:20.000Z | src/process_event_api.py | AndreasVikke/ComputerScience-Final | 52d09a5876bfde661a00736712db6e3d19be877d | [
"MIT"
] | null | null | null | src/process_event_api.py | AndreasVikke/ComputerScience-Final | 52d09a5876bfde661a00736712db6e3d19be877d | [
"MIT"
] | null | null | null | """
Processes event api from slack
:license: MIT
"""
import json
import os
from typing import Dict
from src.modules.create_signedup_homepage import create_home_tap
from src.dependencies.dependency_typing import Requests, PynamoDBConsultant
from src.dependencies.requests_provider import get_requests_provider
from src.dependencies.pynamodb_consultant_provider import get_consultants_provider
def process(event, context):
'''
AWS Serverless Handler
-
:param event: AWS event
:param context: AWS Lambda context
'''
print(event)
print(context)
requests_client = get_requests_provider()
consultant_model = get_consultants_provider()
return proccess_request(event, requests_client, consultant_model)
def proccess_request(event, requests_client: Requests,
consultant_model: PynamoDBConsultant) -> None:
'''
Proccess request
-
:param event: AWS event
:param requests_client: Request Client
:param consultant_model: Consultant Client
'''
event_body = event['body']
if event_body['type'] == 'event_callback':
if 'event' in event_body and event_body['event']['type'] == 'app_home_opened':
user_id = event_body['event']['user']
consultant = next(consultant_model.slack_id_index.query(user_id), None)
if consultant is not None:
home_tap = create_home_tap(consultant.uuid, consultant_model)
else:
with open("src/templates/{0}.json".format('home_tap_template_signup'), "r")\
as body:
home_tap = json.load(body)
data = {
'user_id': user_id,
'view': home_tap
}
response = post('https://slack.com/api/views.publish', data, requests_client)
elif event_body['type'] == 'url_verification':
response = {
'challenge': event_body['challenge']
}
print(response)
return response
def post(url: str, data: Dict, requests_client: Requests) -> Requests:
'''
Posts the data
-
:param url: Url to slack api
:param data: The data to post
:param requests_client: Request client
'''
auth_token = os.environ['SlackAuth']
hed = {'Authorization': 'Bearer ' + auth_token}
response = requests_client.post(url, json=data, headers=hed)
print('RESPONSE: ', response.json())
return response.json()
| 32.269231 | 92 | 0.636075 | import json
import os
from typing import Dict
from src.modules.create_signedup_homepage import create_home_tap
from src.dependencies.dependency_typing import Requests, PynamoDBConsultant
from src.dependencies.requests_provider import get_requests_provider
from src.dependencies.pynamodb_consultant_provider import get_consultants_provider
def process(event, context):
print(event)
print(context)
requests_client = get_requests_provider()
consultant_model = get_consultants_provider()
return proccess_request(event, requests_client, consultant_model)
def proccess_request(event, requests_client: Requests,
consultant_model: PynamoDBConsultant) -> None:
event_body = event['body']
if event_body['type'] == 'event_callback':
if 'event' in event_body and event_body['event']['type'] == 'app_home_opened':
user_id = event_body['event']['user']
consultant = next(consultant_model.slack_id_index.query(user_id), None)
if consultant is not None:
home_tap = create_home_tap(consultant.uuid, consultant_model)
else:
with open("src/templates/{0}.json".format('home_tap_template_signup'), "r")\
as body:
home_tap = json.load(body)
data = {
'user_id': user_id,
'view': home_tap
}
response = post('https://slack.com/api/views.publish', data, requests_client)
elif event_body['type'] == 'url_verification':
response = {
'challenge': event_body['challenge']
}
print(response)
return response
def post(url: str, data: Dict, requests_client: Requests) -> Requests:
auth_token = os.environ['SlackAuth']
hed = {'Authorization': 'Bearer ' + auth_token}
response = requests_client.post(url, json=data, headers=hed)
print('RESPONSE: ', response.json())
return response.json()
| true | true |
f71eddda4654e992cc72ae838a6aad60385ed3a6 | 2,393 | py | Python | authosm/backend.py | TCSOSM-20/LW-UI | 70c3331278f71d3b22fc3a090d526b4b8106d155 | [
"Apache-2.0"
] | null | null | null | authosm/backend.py | TCSOSM-20/LW-UI | 70c3331278f71d3b22fc3a090d526b4b8106d155 | [
"Apache-2.0"
] | null | null | null | authosm/backend.py | TCSOSM-20/LW-UI | 70c3331278f71d3b22fc3a090d526b4b8106d155 | [
"Apache-2.0"
] | null | null | null | #
# Copyright 2018 EveryUP Srl
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from django.core.exceptions import PermissionDenied
from .models import OsmUser
from lib.osm.osmclient.clientv2 import Client
from .exceptions import OSMAuthException
class OsmBackend(object):
def authenticate(self, **kwargs):
'''
kwargs will receive the python dict that may contain
{username, password, project-id} to authenticate
'''
if all(k in kwargs for k in ('username', 'password', 'project_id')):
username = kwargs['username']
password = kwargs['password']
client = Client()
result = client.auth(kwargs)
if 'error' in result and result['error'] is True:
raise OSMAuthException(result['data'])
else:
try:
user = OsmUser.objects.get(username=username)
user.psw = password
user.token = result['data']['id']
user.project_id = result['data']['project_id']
user.project_name = result['data']['project_name']
user.token_expires = result['data']['expires']
user.is_admin = bool(result['data']['admin'])
user.save()
except OsmUser.DoesNotExist:
user = OsmUser(username=username, psw=password, token=result['data']['id'],
project_id=result['data']['project_id'],
token_expires=result['data']['expires'], is_admin=result['data']['admin'])
user.save()
return user
return None
def get_user(self, user_id):
try:
return OsmUser.objects.get(pk=user_id)
except OsmUser.DoesNotExist:
return None
| 36.257576 | 109 | 0.587547 |
from django.core.exceptions import PermissionDenied
from .models import OsmUser
from lib.osm.osmclient.clientv2 import Client
from .exceptions import OSMAuthException
class OsmBackend(object):
def authenticate(self, **kwargs):
if all(k in kwargs for k in ('username', 'password', 'project_id')):
username = kwargs['username']
password = kwargs['password']
client = Client()
result = client.auth(kwargs)
if 'error' in result and result['error'] is True:
raise OSMAuthException(result['data'])
else:
try:
user = OsmUser.objects.get(username=username)
user.psw = password
user.token = result['data']['id']
user.project_id = result['data']['project_id']
user.project_name = result['data']['project_name']
user.token_expires = result['data']['expires']
user.is_admin = bool(result['data']['admin'])
user.save()
except OsmUser.DoesNotExist:
user = OsmUser(username=username, psw=password, token=result['data']['id'],
project_id=result['data']['project_id'],
token_expires=result['data']['expires'], is_admin=result['data']['admin'])
user.save()
return user
return None
def get_user(self, user_id):
try:
return OsmUser.objects.get(pk=user_id)
except OsmUser.DoesNotExist:
return None
| true | true |
f71edf88cfb79a1cb25ad3a7be93346ae19cb1ee | 1,670 | py | Python | apartment/apartment/urls.py | Hardy7/CS465_Final | a38ccc1ebd988ed5ded9acc05ad6c8dd1cc2d1bd | [
"MIT"
] | 1 | 2020-03-15T22:45:42.000Z | 2020-03-15T22:45:42.000Z | apartment/apartment/urls.py | Hardy7/CS465_Final | a38ccc1ebd988ed5ded9acc05ad6c8dd1cc2d1bd | [
"MIT"
] | null | null | null | apartment/apartment/urls.py | Hardy7/CS465_Final | a38ccc1ebd988ed5ded9acc05ad6c8dd1cc2d1bd | [
"MIT"
] | null | null | null | """apartment URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.11/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import include, url
from django.contrib import admin
from django.urls import path
from django.conf.urls import re_path
from myapp.views import *
urlpatterns = [
re_path(r'^admin/', admin.site.urls),
re_path('^users/', include('users.urls', namespace = "users")),
re_path('^$', HomeView.as_view(), name = 'index'),
re_path('^index/', HomeView.as_view(), name = 'index'),
re_path('^student/', StudentListView.as_view(), name = 'student'),
re_path('^room/', RoomListView.as_view(), name = 'room'),
re_path('^electric_charge/', ElectricChargeView.as_view(), name = 'electric_charge'),
re_path('^water_charge/', WaterChargeView.as_view(), name = 'water_charge'),
re_path('^sanitation/', SanitationListView.as_view(), name = 'sanitation'),
re_path('^device/', DeviceListView.as_view(), name = 'device'),
re_path('^device_sent_record/', DeviceSentRecordListView.as_view(), name = 'device_sent_record'),
re_path('^rule/', RuleListView.as_view(), name = 'rule'),
]
| 46.388889 | 101 | 0.694012 | from django.conf.urls import include, url
from django.contrib import admin
from django.urls import path
from django.conf.urls import re_path
from myapp.views import *
urlpatterns = [
re_path(r'^admin/', admin.site.urls),
re_path('^users/', include('users.urls', namespace = "users")),
re_path('^$', HomeView.as_view(), name = 'index'),
re_path('^index/', HomeView.as_view(), name = 'index'),
re_path('^student/', StudentListView.as_view(), name = 'student'),
re_path('^room/', RoomListView.as_view(), name = 'room'),
re_path('^electric_charge/', ElectricChargeView.as_view(), name = 'electric_charge'),
re_path('^water_charge/', WaterChargeView.as_view(), name = 'water_charge'),
re_path('^sanitation/', SanitationListView.as_view(), name = 'sanitation'),
re_path('^device/', DeviceListView.as_view(), name = 'device'),
re_path('^device_sent_record/', DeviceSentRecordListView.as_view(), name = 'device_sent_record'),
re_path('^rule/', RuleListView.as_view(), name = 'rule'),
]
| true | true |
f71edf8ec073efffa34dabc63868cec27920ed6d | 43,616 | py | Python | instructor/day_one.py | deniederhut/workshop_pyintensive | f8f494081c6daabeae0724aa058c2b80fe42878b | [
"BSD-2-Clause"
] | 1 | 2016-10-04T00:04:56.000Z | 2016-10-04T00:04:56.000Z | instructor/day_one.py | deniederhut/workshop_pyintensive | f8f494081c6daabeae0724aa058c2b80fe42878b | [
"BSD-2-Clause"
] | 8 | 2015-12-26T05:49:39.000Z | 2016-05-26T00:10:57.000Z | instructor/day_one.py | deniederhut/workshop_pyintensive | f8f494081c6daabeae0724aa058c2b80fe42878b | [
"BSD-2-Clause"
] | null | null | null |
# coding: utf-8
# # Day One
# ## Table of Contents
# 1. [Data Model](#Data-Model)
# 2. [Data Structures](#Data-Structures)
# 3. [Control Flow](#Control-Flow)
# 4. [Input and Output](#Input-and-Output)
# 5. [`os`](#os)
# 6. [`glob`](#glob)
# 7. [`subprocess`](#subprocess)
#
# Links to documentation will be provided at the beginning and end of each section. Look for: **DOCS**.
# In today's workshop, we'll learn how to combine data types into structures and how to use them for specific purposes. We will also cover looping and interacting with operating systems. Let's get started.
# ## Data Model
# [**DOCS**](https://docs.python.org/3/reference/datamodel.html)
#
# >Objects are Python’s abstraction for data. All data in a Python program is represented by objects or by relations between objects.
#
# Every object in Python has a **type**, a **value**, and an **identity**. We've already seen several data types, such as `int`, `float`, and `str`. An object's type determines its supported operations as well as the possible values it can take.
#
# In some cases, an object's value can change. We call these type of objects *mutable*. Objects whose values cannot be changed are known as *immutable*. The object type determines its mutability. Numbers and strings, for example, are immutable; lists and dictionaries, which we'll cover shortly, are mutable.
#
# To make this concrete, let's describe what an object's identity is. This can be thought of as an object's address in memory. Specifically, it's the memory address for the *value* of the object. Once an object has been created, it's identity never changes.
# In[1]:
x = 'hello'
# In[2]:
hex(id(x))
# The variable `x`'s identity or memory address is `___________` (represented as a hexadecimal string). Note that the memory addresses will be different each time this code is run.
#
# What happens if we create a new variable, `y`, and set it equal to `x`?
# In[3]:
y = x
# In[4]:
hex(id(y))
# In[5]:
hex(id(x))
# The address in memory is the same because both variables *point* to (or reference) the same *value*.
#
# Now, let's make `x` take on some other value.
# In[6]:
x = 'goodbye'
# In[7]:
hex(id(x))
# Now, the address *is* different.
#
# Let's see what happens if we set `x` to equal `'hello'` once more.
# In[8]:
x = 'hello'
# In[9]:
hex(id(x))
# `x` is once again pointing to the memory address associated with `'hello'`.
#
# What does this have to do with mutability? It seems as though we were actually able to change `x`'s value. To answer this, we'll show an example using a mutable object—a list in this case.
# In[10]:
a = [1, 2, 3]
# In[11]:
hex(id(a))
# In[12]:
a.append(4)
a
# In[13]:
hex(id(a))
# Notice what happened. We added `4` to the list, but the memory address *did not* change. This is what is means to be mutable. The value in memory address `0x107f26608` was originally `[1, 2, 3]`, but is now `[1, 2, 3, 4]`. The address in memory for this object's value will never change.
# In[14]:
a.append('#python')
a
# In[15]:
hex(id(a))
# Now let's see what happens when we assign our list `a` to a new variable `b`.
# In[16]:
b = a
# In[17]:
b
# In[18]:
hex(id(b))
# That makes sense. `a` and `b` both reference the same object—`[1, 2, 3, 4, '#python']`.
#
# >Assignment statements in Python do not copy objects, they create bindings between a target and an object.
#
# If we modify `b`, what will happen to `a`?
# In[19]:
b[-1] = 'Python'
# In[20]:
b
# In[21]:
a
# In[22]:
hex(id(a)) == hex(id(b))
# The changes made to `b` have affected `a` because they both point to the same data. It's possible that this behavior is unwanted. As a solution, we can make a copy of the object so that modifying one does not affect the other. To do so, we can use the built-in `copy` module.
# In[23]:
import copy
# In[24]:
c = copy.copy(a)
# This is referred to as making a *shallow* copy. While the values in `a` and `c` are the same, their respective memory addresses are different.
# In[25]:
hex(id(a)) == hex(id(c))
# A shallow copy creates a new container (a list in this case)—which is why the addresses in memory are different—with *references* to the *contents* of the original object.
# In[26]:
hex(id(a[-1]))
# In[27]:
hex(id(c[-1]))
# The addresses in memory for the individual elements are the same for both lists. Because we've made a copy, though, we can now modify one list without affecting the other.
# In[28]:
c[-1] = 'PYTHON'
# In[29]:
c
# In[30]:
a
# What if we were dealing with nested mutable? For this, we'll use a dictionary.
# In[31]:
d0 = {'key' : {'nested' : 'thing'}}
d1 = copy.copy(d0)
# In[32]:
d1
# In[33]:
d1['key']['nested'] = 'dict'
# In[34]:
d0 == d1
# In[35]:
d0
# Our intention was to change `d1`, but `d0` was also changed. This is because shallow copies reference contents—they don't copy them. For this, the `copy` module provides the `deepcopy()` function. Let's try that again.
# In[36]:
d0 = {'key' : {'nested' : 'thing'}}
d1 = copy.deepcopy(d0)
d1['key']['nested'] = 'dict'
# In[37]:
d0 == d1
# In[38]:
d0
# In[39]:
d1
# Now that we've learned about mutability and copying objects, let's dive into data structures.
#
# Data model [**DOCS**](https://docs.python.org/3/reference/datamodel.html)
# ## Data Structures
# [**DOCS**](https://docs.python.org/3.1/tutorial/datastructures.html)
#
# A data structure can be thought of as a "container" for storing data that includes functions, called "methods," that are used to access and manipulate that data. Python has several built-in data structures.
# ### Basics
# #### Lists
# A list is a sequence of values. The values are called elements (or items) and can be of any type—integer, float, string, boolean, etc.
#
# As a simple example, consider the following list.
# In[40]:
[1, 2, 3]
# Notice how the list was constructed. We used square brackets around the list elements.
#
# Let's look at a few more examples.
# In[41]:
[1.0, 8.0, 6.8]
# In[42]:
['this', 'is', 'also', 'a', 'valid', 'list']
# In[43]:
[True, False, True]
# It's also fine to have a list with different element types.
# In[44]:
[1, 2.0, 'three']
# Lists can even be nested—which means you can have lists within lists.
# In[45]:
[350, 'barrows', 'hall', ['berkeley', 'CA']]
# This nesting can be arbitrarily deep, but it's not usually a good idea as it can get confusing. For example, it may be difficult to access specific items for an object like:
#
# ```python
# [[[1, 2], [3, 4, [5, 6]]], [7, 8, 9]]
# ```
#
# Speaking of accessing elements, let's describe how to do that. We'll first create a new list and assign it to a variable called `first_list`.
# In[46]:
first_list = [9, 8, 7.0, 6, 5.4]
# To access list elements, we use the square bracket notation. For example, if we're interested in the middle element—the "two-eth" element—we use the following.
# In[47]:
first_list[2]
# This is called indexing and the value inside of the brackets must be an integer. (Recall that indices in Python start at `0`.) A list can be thought of mapping (or correspondence) between indices and elements.
#
# Let's say you're interested in the *last* element of this list. How could you do that? If you know the length of the list, you could access it using something like:
#
# ```python
# first_list[len(first_list) - 1]
# ```
#
# Why is the `-1` needed?
#
# There is an easier way. Python provides negative indices that let you access elements from "back-to-front."
# In[48]:
first_list[-1]
# With this notation, the last element is accessed with `-1` (because `-0 == 0`). Use `-2` to access the second-to-last item, `-3` to access the third-to-last element, and so on.
#
# We can also use the slice operator on lists to access multiple elements. The operator takes the following form: `[n:m]`. The first value before the colon (`:`) specifies the start position and the second value specifies the end position. The former is inclusive and the latter is exclusive. Let's take a look at what we mean.
#
# To motivate this, let's label the indices of our list.
#
# ```
# list: [9, 8, 7.0, 6, 5.4]
# index: [0, 1, 2, 3, 4]
# ```
#
# The code we'll submit is: `first_list[0:2]`. This tells Python to include values associated with position 0, position 1, but **not** for position 2.
# In[49]:
first_list[0:2]
# This is how Python has decided to make this operator work. This isn't intuitive, but thinking about it in the following way might help. If we consider the indices to be to the *left* of each item, we can think of the slice operator as accessing elements *between* those indices.
#
# If you try to access an item at an index that doesn't exist, Python will throw an `IndexError`:
# In[50]:
first_list[10]
# _from Raymond Hettinger_
#
# If, however, I try to access the same item with a slicing operation, e.g. `first_list[10:11]`, there is no error. Why?
# In[51]:
first_list[10:11]
# With lists, because they are mutable, we can modify elements.
# In[52]:
first_list[-1] = 5.43
# In[53]:
first_list
# #### Dictionaries
# A dictionary is a mapping from *keys* to *values*, where the keys, which must be unique, can be (almost) any type. A key and its associated value is referred to as a *key-value pair* or item. Dictionaries can be thought of as *unordered* key-value pairs.
#
# There are several ways to construct a dictionary. We can use braces (`{}`) or the built-in `dict()` function.
# In[54]:
{}
# In[55]:
dict()
# Of course, these are empty. Let's add comma separated key-value pairs to the first and use the assignment operator (`=`) for the second.
# In[56]:
{'one' : 1, 'two' : 2}
# In[57]:
dict(one=1, two=2)
# Keys and values are themselves separated by colons.
#
# Dictionaries are typically used for accessing values associated with keys. In the example above, we started to create a mapping between number words and their integer representations. Let's expand on this.
# In[58]:
nums = {'one' : 1, 'two' : 2, 'three' : 3, 'four' : 4, 'five' : 5, 'six' : 6}
# In[59]:
nums
# Notice that the key-value pairs are *not* in the order we specified when creating the dictionary. This isn't a problem, though, because we use the keys to look up the corresponding values. We do this using bracket notation, like we did with strings and lists.
# In[60]:
nums['five']
# If the key does not exist, you'll get an error.
# In[61]:
nums['seven']
# We can add the value for 'seven' by doing the following:
# In[62]:
nums['seven'] = 7
# In[63]:
nums
# We mentioned earlier that keys can be of almost any type. Values *can* be of any type and we can also mix types.
# In[64]:
mixed = {'one' : 1.0, 'UC Berkeley' : 'Cal', 350 : ['Barrows', 'Hall']}
# In[65]:
mixed
# In this example, we used string and integer keys. We could have actually used any *immutable* objects.
#
# Notice that we used a list as a value, which is valid. What if we tried using a list, which is mutable, as a key?
# In[66]:
{['this'] : 'will not work'}
# We get a `TypeError` saying that we can't use an unhashable type. What does this mean? In Python, dictionaries are implemented using hash tables. Hash tables use hash functions, which return integers given particular values (keys), to store and look up key-value pairs. For this to work, though, the keys have to be immutable, which means they can't be changed.
# #### Tuples
# A tuple is a sequence of values. The values, which are indexed by integers, can be of any type. This sounds a lot like lists, right?
#
# >Though tuples may seem similar to lists, they are often used in different situations and for different purposes. Tuples are immutable, and usually contain an heterogeneous sequence of elements.... Lists are mutable, and their elements are usually homogeneous....
#
# By convention, a tuple's comma-separated values are surrounded by parentheses.
# In[67]:
(1, 2, 3)
# Parentheses aren't necessary, though.
# In[68]:
t = 1, 2, 3
# In[69]:
type(t)
# The commas are what define the tuple. In fact, any set of multiple comma-separated objects *without* identifying symbols, such as brackets for lists, default to tuples.
#
# We can't create a tuple with a single element using the following syntax.
# In[70]:
type((1))
# We need to include a comma following the value.
# In[71]:
type((1,))
# The construction of `t`, above, is an example of *tuple packing*, where the values `1, 2, 3` are "packed" into a tuple.
#
# We can also perform the opposite operation, called *sequence unpacking*.
# In[72]:
a, b, c = t
# In[73]:
print(a, b, c)
# For this, the number of variables on the left must equal the number of elements in the sequence.
#
# This can be used with functions. In Python, functions can only return a single value. However, that value can be a tuple. In this case, you are effectively returning multiple values.
#
# Most list operators work on tuples. To access tuple elements, for example, we can use the bracket operator.
# In[74]:
t = ('a', 'b', 'c', 'd')
# In[75]:
t[0]
# We can also use the slice operator.
# In[76]:
t[1:3]
# Because tuples are immutable, we cannot modify tuple elements.
# In[77]:
t[0] = 'A'
# However, we can create a new tuple using existing tuples.
# In[78]:
t0 = 'A',
t1 = t[1:]
# In[79]:
t0 + t1
# #### Sets
# A set is an unordered collection of unique elements. Because sets are unordered, they do not keep track of element position or order of insertion. As a result, sets do not support indexing or slicing.
#
# >Basic uses include membership testing and eliminating duplicate entries. Set objects also support mathematical operations like union, intersection, difference, and symmetric difference.
#
# To construct a set, we can use braces (`{}`) or the built-in `set()` function.
# In[80]:
{3, 1, 4, 1, 5, 9, 2, 6, 5, 3, 5, 8, 9, 7, 9, 3}
# This returns the *unique* values passed in. In this case, the digits between 1-9, inclusive.
#
# Let's say we had the following list of fruits.
# In[81]:
basket = ['apple', 'orange', 'apple', 'pear', 'orange', 'banana']
# We can find the unique fruits by using the `set()` function.
# In[82]:
set(basket)
# Unlike other built-in Python data structures, sets support differencing.
# In[83]:
{1, 2, 3} - {2}
# In[84]:
{1, 2, 3} - {1, 2, 3}
# Sets are useful for finding unique values and for performing mathematical operations like the ones previously mentioned.
#
# Python also provides "specialized" container types in its `collections` module. These are alternatives or, rather, complements, to Python's general-purpose, built-in containers that we've just covered‐lists, dictionaries, tuples, and sets. For more information on these other data structures, see [the documentation](https://docs.python.org/3.5/library/collections.html#module-collections).
#
# In the following section, we'll explore several operators that the data structures covered above respond to.
# ### Operators
# There are several operators supported in Python. They are:
#
# * arithmetic
# * comparison (relational)
# * assignment
# * logical
# * bitwise
# * membership
# * identity
#
# We've already covered some of these either directly or in passing. We'll discuss how some of these operate on the data structures we've learned about thus far.
# #### Arithmetic
# The arithmetic operators are the ones you're probably most familiar with. These include `+`, `-`, `*`, `/`, and `**` to name a few. Of course, not all of these work on all Python data types.
#
# Previously, we saw how the `+` and `*` operators, which correspond to concatenation and repetition, operate on strings. It turns out that lists and tuples respond in similar ways.
# In[85]:
[1, 2, 3] + [4, 5, 6]
# In[86]:
(1, 2, 3) + (4, 5, 6)
# In[87]:
['Cal'] * 3
# In[88]:
('D-Lab',) * 3
# #### Comparison
# These type of operators "compare the values on either sides of them and decide the relation among them."
# In[89]:
[1, 2, 3] == [1, 2, 3]
# In[90]:
[0, 2, 3] == [1, 2, 3]
# >The comparison uses *lexicographical* ordering: first the first two items are compared, and **if they differ this determines the outcome of the comparison**; if they are equal, the next two items are compared, and so on, until either sequence is exhausted.
# In[91]:
[0, 2, 3] < [1, 2, 3]
# In the comparison above, because the `0` is less than the `1`, the result is `True`. Once this is determined, subsequent values are *not* compared. In the example below, the return value is `True` even though `20` is greater than `2`.
# In[92]:
[0, 20, 30] < [1, 2, 3]
# The behavior is the same with tuples.
# In[93]:
(0, 20, 30) < (1, 2, 3)
# In[94]:
(0, 1, 2) == (0, 1, 3)
# Interestingly, the behavior is slightly different with sets. Consider the list and set comparisons below.
# In[95]:
[0, 3, 4] < [1, 2, 9]
# In[96]:
set([0, 3, 4]) < set([1, 2, 9])
# With sets, the comparisons are made for every element in each corresponding sequence.
#
# Comparisons can be made with dictionaries, too.
# In[97]:
{'one' : 1} == {'one' : 1}
# But we can only check for equality.
# In[98]:
{'one' : 1} < {'one' : 1}
# #### Membership
# These operators test for membership—that is, whether the particular item exists—in a sequence.
# In[99]:
'D-Lab' in ['D-Lab', 'UC Berkeley']
# In[100]:
1 in (0, 1, 2)
# In[101]:
99 in {1868, 350, 102}
# For dictionaries, membership is tested against the keys.
# In[102]:
cities = {'Berkeley' : 'California',
'Miami' : 'Florida',
'New York' : 'New York',
'Seattle' : 'Washington'}
# In[103]:
'Berkeley' in cities
# The other membership operator is `not in`.
# In[104]:
99 not in {1868, 350, 102}
# #### Identity
# To compare the memory locations of objects, use identity operators.
# In[105]:
a = 'this'
b = 'this'
# In[106]:
a is b
# In this case, the memory address for both `a` and `b` is the same because they are pointing to the same value.
#
# Is this behavior consistent across daya types?
# In[107]:
a = 1868
b = 1868
# In[108]:
a is b
# Apparently not.
# In[109]:
hex(id(a)), hex(id(b))
# What if we set `b` to equal `a`?
# In[110]:
b = a
# In[111]:
a is b
# Like with the membership operator, the complement to `is` is `is not`.
# In[112]:
a = 'this'
b = 'that'
# In[113]:
a is not b
# ### Functions and Methods
# We're familiar with functions, but what are methods?
#
# >A method is a function that "belongs to" an object.
#
# We have already seen string methods. For example, from day zero:
#
# ```python
# >>> my_string = 'Dav Clark wears a beret'
# >>> my_string = my_string.replace('beret', 'speedo')
# >>> print(my_string)
# Dav Clark wears a speedo
# ```
#
# Here, `.replace()` is the method.
#
# Python data structures have methods, too.
# #### Lists
# Let's use `first_list`, which we created above, to demonstrate some list functions and methods.
#
# Let's say we wanted to know how many elements are in `first_list`. For this, we would use the `len()` function.
# In[114]:
len(first_list)
# What about the largest and smallest values
# In[115]:
max(first_list), min(first_list)
# Let's say we wanted to add an element to `first_list`. For this, we can use the `.append()` method.
# In[116]:
first_list.append(2)
# Notice that methods are called using dot notation on the object we'd like to modify.
#
# By default, `.append()` adds an element to the *end* of a given list.
# In[117]:
first_list
# Notice how we invoked this method. We did not use an assignment operator (e.g., `x = x.append(y)`). This is because—and this is important—list methods are all void, which means that they *modify* lists and return `None`.
#
# Sometimes when we're adding elements to a list, we may with to insert it in a given position. For this, we can use the `.insert()` method. It takes two arguments—the first is the *position* and the second is the *value*. Let's say we wanted to add an item to the front of the list. We could do it using:
# In[118]:
first_list.insert(0, 10)
# In[119]:
first_list
# Let's append another value to the list.
# In[120]:
first_list.append(2)
# Now, let's count how many times the value `2` appears.
# In[121]:
first_list.count(2)
# In[122]:
first_list
# Let's say we wanted to remove one of the `2`s.
# In[123]:
first_list.remove(2)
# In[124]:
first_list
# The remove method removes the *first* item in the list that matches the value in the parentheses.
#
# In some cases, we might want to know the index value for a certain list element. We can use `.index()` for this.
# In[125]:
first_list.index(5.43)
# The value `5.43` can be found at index `5`.
#
# More information on list methods can be found [here](https://docs.python.org/3.5/tutorial/datastructures.html#more-on-lists).
# #### Dictionaries
# Let's use our `nums` dictionary to demonstrate some `dict` methods.
# In[126]:
nums
# The `len()` function we saw above also works on dictionaries. It returns the number of items in the object.
# In[127]:
len(nums)
# We might be interested in getting a list of the keys in `nums`. The `.keys()` method returns a list with this information.
# In[128]:
nums.keys()
# We can do the same for values.
# In[129]:
nums.values()
# To add to the dictionary, we can use the `.update()` method.
# In[130]:
nums.update(eight=8)
# In[131]:
nums
# Notice that we don't use quotation marks around the key name `eight`.
#
# If we'd like to remove an item, we can use the `.pop()` method. This removes the item—the key-value pair—*and* returns the value.
# In[132]:
nums.pop('one')
# In[133]:
nums
# We've successfully removed `{'one' : 1}` from `nums`.
# #### Tuples
# Tuples have no methods.
# #### Sets
# There are several set methods. They can be used for updating set objects or for performing mathematical operations. For example, we can add an element to set `s`.
# In[134]:
s = {1, 8, 6, 8}
# In[135]:
s
# In[136]:
s.add(0)
# In[137]:
s
# We can also remove set elements.
# In[138]:
s.remove(1)
# In[139]:
s
# Python supports several mathematical operations on sets. We can check the intersection—or overlap—of two sets, for example.
# In[140]:
{1, 2, 3} & {3, 4, 5} # or {1, 2, 3}.intersection({3, 4, 5})
# Another common set operation is the union, which basically combines sets.
# In[141]:
{0, 1} | {1, 2} # or {0, 1}.union({1, 2})
# Above, we saw that `{1, 2, 3} - {2}` resulted in `{1, 3}`. However, if the second set had more values in it, those values would not be represented in the final set. Python sets allow you to calculate the symmetric difference:
# In[142]:
{1, 2, 3} ^ {3, 4, 5}
# Along with testing for supersets and subsets
# In[143]:
{1, 2, 3} > {2, }
# Data structures [**DOCS**](https://docs.python.org/3.1/tutorial/datastructures.html)
# ## Control Flow
# [**DOCS**](https://docs.python.org/3/tutorial/controlflow.html)
# ### `for`
# In Python, a `for` statement iterates over items in a sequence—such as strings, lists, and tuples—in the order that they appear. `for` loops have the following syntax.
#
# ```Python
# for item in sequence:
# do_something_with(item)
# ```
#
# > side note - for whatever reason, some students have a really hard time with for loop syntax. Emphasize that in `for x in sequence`, `x` is an arbitrary name so that you can refer to the object returned by the iterator while you are inside of the loop. You could also use `for dinosaur in sequence`, but this reduces readibility in your code
#
# The `sequence` object should be iterable. The `statement(s)` are executed once for each item in the sequence. This is referred to as traversing the sequence. The loop ends when there are no more elements in the sequence.
#
# Let's look at some examples.
# In[144]:
text_var = 'berkeley'
# In[145]:
for c in text_var:
print(c)
# With strings, the `for` statement iterates over each character. With lists (or tuples), on the other hand, each list element is iterated over.
# In[146]:
list_var = [350, 'Barrows', 'Hall']
# In[147]:
for e in list_var:
print(e)
# With dictionaries, `for` loops iterate over keys.
# In[148]:
for k in {'one' : 1, 'two' : 2, 'three' : 3}:
print(k, end=" ")
# If we'd like a loop that iterates a given number of times or over a sequence of numbers, we can use the `range` object.
# In[149]:
for v in range(4):
print(v, end=" ")
# ### `while`
# Another way to achieve this—to iterate a given number of times—is to use the `while` loop.
# In[150]:
n = 0
while n < 4:
print(n, end=" ")
n += 1
print('\ndone')
# In this example, we have to increment `n` with each iteration of the loop. The body statements in `while` loops repeatedly execute as long as the header condition evaluates to `True`. Once the loop ends, program control passes to the line immediately following the loop.
#
# With `while` loops, there are two possibilities to be aware of. First, it's possible that some `while` loops never execute. Using the code above, if the value of `n` is initially `4` or greater, only `'done'` will be printed.
# In[151]:
n = 4
while n < 4:
print(n, end=" ")
n += 1
print('\ndone')
# Above, because the condition evaluates to `False`, the loop body is skipped and the first statement after the `while` loop is executed.
#
# Second, some `while` loops may run indefinitely. This is referred to as an infinite loop and happens when the condition *never* evaluates to `False`. Here is an example.
#
# ```Python
# n = 4
# while n >= 4:
# print(n, end=" ")
# n += 1
# print('\ndone')
# ```
# ### `if`
# In many cases, it's useful to control the order in which statements or function calls are executed or evaluated. A control flow statement determines which path or paths in a program should be followed. Control flow statements, for example, can:
#
# * execute a set of statements if a condition or certain conditions are met
# * execute a set of statements `n` times until a condition or certain conditions are met
# * stop the execution of a program
#
# How can we achieve this? The most well-known statement type is the `if` statement.
# In[152]:
x = 0
# In[153]:
if x == 0:
print('x is zero')
# `if` statements make use of boolean expressions. If the expression (or set of expressions) evaluate to `True`, the indented statement gets executed. Otherwise, nothing happens.
# In[154]:
x = 1
# In[155]:
if x == 0:
print('x is zero')
# The code above is referred to as a clause. Clauses contain "headers" and "bodies." Clause headers begin with identifying keywords—in this case, `if`—include boolean expressions, and end with colons. The body is a group of indented statements controlled by the clause. This is also known as a "block."
#
# Compound statements are made up of one or more clauses. For example, there might be two possibilities in which case we use the `else` keyword. We can combine the above as follows.
# In[156]:
if x == 0:
print('x is zero')
else:
print('x is not zero')
# Notice that clause headers are at the same indentation level.
#
# When there are more than two possibilities, we can use what are called chained conditionals. For this, we use the `elif` keyword.
# In[157]:
if x == 0:
print('x is zero')
elif x < 0:
print('x is negative')
elif x > 0:
print('x is positive')
# Of course, the code above only works if `x` is numeric. Assuming this is the case, all possible values of `x` are listed. Because of this, we can change the last clause (`elif x > 0`) to `else`.
#
# There isn't a "right" way to do this. A good approach is to write it such that its easily readable for yourself and others.
#
# What if `x` is *not* numeric? With the code as is, we'll get a `TypeError`. So, let's generalize what we have and wrap it in a function.
# In[158]:
def x_is(x):
if type(x) is str:
print('x is str')
elif type(x) in [int, float]:
if x == 0:
print('x is zero')
elif x < 0:
print('x is negative')
elif x > 0:
print('x is positive')
else:
print('invalid x value')
# Before we call our function, let's explain what's going on. Our function, as defined, is an example of a "nested conditional." We first perform a type check and, if `x` is numeric, there are another set of conditions which are checked.
# In[159]:
x_is('ucb')
# In[160]:
x_is(1)
# In[161]:
x_is(0)
# In[162]:
x_is([1, 2, 3])
# In[163]:
x_is(None)
# Control flow [**DOCS**](https://docs.python.org/3/tutorial/controlflow.html)
# ## Input and Output
# [**DOCS**](https://docs.python.org/3/tutorial/inputoutput.html#reading-and-writing-files)
#
# Interacting with data in files is a common task in Python. These can be plain text files, comma-delimited (CSV) files, or any other number of file formats.
#
# To open files, we can use the built-in `open()` function. There is a file named `lorem-ipsum.txt` in the `data/` directory that we'll use to learn about file input and output.
#
# The `open()` function is typically used with two arguments—the filename and the "mode." The mode describes how the file will be used. The default is `'r'`, which stands for "read only."
# In[164]:
f = open('../data/01_lorem-ipsum.txt', 'r')
# `f` is a file object. There are several methods we can use to interact with the file's contents.
#
# The `.read(size)` method reads the contents of the file object. The optional numeric argument, *`size`*, corresponds to the number of bytes that should be read. This is useful if the data file is large. If we omit `size`, the entire contents of the file will be read and returned.
# In[165]:
f.read()
# What happens if we try to call `f.read()` again? "If the end of the file has been reached, `f.read()` will return an empty string (`''`)." In this situation, the "cursor" is at the end of the file and has nothing more to read.
#
# Because we'd like to show a few other methods, we can return to the beginning of the file using the `.seek()` method, passing in `0` as the argument.
# In[166]:
f.seek(0)
# Let's say we wanted to read the file, line-by-line. We can accomplish this using the `.readline()` method. The end of a "line" is identified by the presence of a new line character, `\n`. You can see some in the text output above.
# In[167]:
f.readline()
# In[168]:
f.readline()
# And so on.
#
# If you ever need to know the file object's current position, use the `.tell()` method.
# In[169]:
f.tell()
# This represents the number of *bytes* from the beginning of the file.
#
# We can also loop over the file object. Let's return to the start of the file first.
# In[170]:
f.seek(0)
# In[171]:
for line in f:
print(line)
# When we're done interacting with a file, that fie should always be closed.
# In[172]:
f.close()
# We can always check whether a file is closed by using the following.
# In[173]:
f.closed
# The `with` keyword in Python ensures that files are properly closed after its associated code is executed. This is true even if an exception is raised. Using the `with` keyword is recommended.
#
# Let's print each line on our document using this syntax.
# In[174]:
with open('../data/01_lorem-ipsum.txt', 'r') as f:
for line in f:
print(line)
# We can also check that the file was, indeed, closed.
# In[175]:
f.closed
# What about writing to a file? There are two primary modes we can use for this: `'w'` for writing only and `'a'` for appending to a file. If a file opened in `'w'` mode already exists, it will be overwritten. Opening a file in `'a'` mode simply allows lines to be added to the end of an existing file.
#
# Let's start by creating a new file.
# In[176]:
with open('first-write.txt', 'w') as f:
f.write('this is our first line\n')
f.write('this is our last line')
# Now, let's check the contents of the file.
# In[177]:
with open('first-write.txt', 'r') as f:
for line in f:
print(line)
# Note that while we've been using `f` to identify our file object, we can use any valid variable name.
#
# Now, let's append to our file.
# In[178]:
with open('first-write.txt', 'a') as append_file:
append_file.write('\nthis is the real last line')
# Notice that we add a new line character to the beginning of this third line.
# In[179]:
with open('first-write.txt') as infile:
for row in infile:
print(row)
# In the code above, we use `row` where we had previously used `line`. We did that to serve as a reminder that the variable names used are not special in any way. It is, however, always a good idea to use descriptive variable names that make reading the code more understandable. This is part of making code "readable." For a bit more on this, see [here](http://docs.python-guide.org/en/latest/writing/style/), [here](https://www.python.org/dev/peps/pep-0008/), and [here](https://github.com/amontalenti/elements-of-python-style).
#
# The `open()` function can take a variety of file types. We've seen examples of how to use this with a `.txt` file.
#
# The CSV (comma separated values) format is "the most common import and export format for spreadsheets and databases."
#
# >[A] comma-separated values (CSV) file stores tabular data (numbers and text) in plain text. Each line of the file is a data record. Each record consists of one or more fields, separated by commas.
#
# We can open comma-delimited CSV files with `open()`, too. Let's open an example CSV file in `data/` called `roster.csv`.
# In[180]:
with open('../data/01_roster.csv', 'r') as roster:
for student_data in roster:
print(student_data)
# This file includes some made-up student information—a four-digit ID number, academic status, and demographic data.
#
# In some cases—say, if we need to calculate the average age of these students—we don't actually want to iterate over the first row, which is often called the "header."
# In[181]:
with open('../data/01_roster.csv', 'r') as roster:
next(roster)
for student_data in roster:
print(student_data)
# We do this using the `next()` function, which just goes to the next line. In this case, since we're starting at the top of the file, it goes to the second line.
#
# Now, let's say we wanted to create a list of the six student ages. How might we go about doing that? One approach might be to split each line on commas to extract the age. This would work expect for the fact that student `2109`'s department *includes* a comma in the value.
#
# To help with situations like these, Python has a built-in `csv` module which includes lots of functionality for working with these types of types. Let's show how we could use this to calculate the average age of the students.
# In[182]:
import csv
# In[183]:
ages = []
with open('../data/01_roster.csv', 'r') as f:
next(f)
roster = csv.reader(f, delimiter=',', quotechar='"')
for student_data in roster:
ages.append(int(student_data[3]))
# The `reader()` function allows us to specify the delimiter and the quote character. The quote character, in this case, is the quotation mark (`"`). CSV files often wrap string values in quotes (or other characters) if they include the delimiter within them. The `reader()` function parses each line as a list of strings, taking into consideration the delimiter and quote character. This is why we can select the third element in `student_data` and why we change (or cast) the type to `int`. As we iterate over each line, we add the age value to `ages`.
#
# Now, we can create a new variable that holds the ages and calculate the average.
# In[184]:
ages_mean = sum(ages) / len(ages)
# In[185]:
print('The average age of students in the roster is: %.2f' % ages_mean)
# The `%.2f % ages_mean` simply instructs Python to print the value in `ages_mean` to two decimal places.
# Input output [**DOCS**](https://docs.python.org/3/tutorial/inputoutput.html#reading-and-writing-files)
# ## `os`
# [**DOCS**](https://docs.python.org/3/library/os.html)
#
# It is often useful and sometimes necessary to interact with the operating system. For example, we might be interested in modifying file paths or getting a list of files in a given directory. Python's built-in `os` module provides "operating system dependent functionality."
#
# To start, let's import `os`.
# In[186]:
import os
# Let's begin by listing our current working directory.
# In[187]:
os.getcwd()
# We know we have a `data/` directory in our repository, but we might not know its contents. We can get that information by using the following.
# In[188]:
os.listdir('../data/')
# This results in a list of the entries in the directory (excluding `.` and `..`). Notice that we're able to specify a *relative* path with `listdir()`.
#
# If we were writing a Python script that used one of these files, we might want to include checks for whether or not the files exist. We can also accomplish this with `os`. First, we can check if a directory exists.
# In[189]:
os.path.isdir('../data/')
# We can also check to see if a file exists.
# In[190]:
os.path.isfile('../data/01_roster.csv')
# Both of these return a Boolean value. One way these could be used is in conjunction with `if` statements. An alternative, the `os.path.exists()` function, checks for either files or directories.
#
# If a directory doesn't exist, we can create it from within Python. This is accomplished using the `mkdir()` function, which takes a file path as an argument.
# In[191]:
os.mkdir('newdir')
# Let's check the contents of the current directory.
# In[192]:
os.listdir()
# We can use the `rmdir()` function to remove `newdir/`.
# In[193]:
os.rmdir('newdir')
# For more information on the available functions, see [the documentation](https://docs.python.org/3/library/os.html#process-parameters).
#
# `os` [**DOCS**](https://docs.python.org/3/library/os.html)
# ## `glob`
# [**DOCS**](https://docs.python.org/3/library/glob.html)
#
# It's sometimes necessary to find file or pathnames matching a particular pattern. Python's built-in `glob` module uses Unix shell-style wildcards for pattern matching. Note that these are different from regular expressions.
#
# There is no shell variable (e.g., `$PATH`) or tilde (`~`, which typically refers to the "home" directory) expansion in `glob`. In addition, `glob` does not show hidden files—those that start with dots (`.`).
#
# Below we describe the behavior of the shell-style wildcards.
#
# Pattern | Meaning
# ------- | -------
# `*` | matches everything
# `?` | matches any single character
# `[seq]` | matches any character in seq
# `[!seq]` | matches any character not in seq
#
# Above, when we used `os.listdir()` in our current directory, the returned list included the Jupyter notebook files as well as a directory and the `.ipynb_checkpoints` file. Let's see what `glob` returns.
# In[194]:
import glob
# In[195]:
glob.glob('*')
# Notice that the list does not include `.ipynb_checkpoints`.
#
# Let's use `glob` to show only the `.ipynb` files.
# In[196]:
glob.glob('*.ipynb')
# If we want directories only.
# In[197]:
glob.glob('*/')
# The `*` matches zero or more characters.
#
# Let's create a few directories (and a file) to make this concrete.
# In[198]:
get_ipython().system('mkdir test')
get_ipython().system('mkdir test1')
get_ipython().system('mkdir test10')
get_ipython().system('mkdir test100')
get_ipython().system('touch test.txt')
# Note that the `!` before each line above allows us to run shell commands from within the notebook.
# In[199]:
glob.glob('test*')
# This returns any file or directory that begins with `'test'` and end with any (or no other) character.
#
# We can also match directories only.
# In[200]:
glob.glob('test*/')
# To match a single character, we can use the `?` wildcard character. This matches any character in the specified position of the name.
# In[201]:
glob.glob('test?')
# In this case, the only match is `test1`, which we know is a directory.
# Next, let's show what the character range (`[]`) wildcard can do. We'll create a few more directories (we'll clean this up when we're done).
# In[202]:
get_ipython().system('mkdir tset0')
get_ipython().system('mkdir tset1')
get_ipython().system('mkdir tset5')
get_ipython().system('mkdir tset10')
get_ipython().system('mkdir tset50')
# The character range wildcard matches a single character in the specified range.
# In[203]:
glob.glob('tset[0-1]')
# The code above matches files or directories that start with `tset` and that end with either `0` or `1`. If we were to have used `0-9` in the brackets, it would have also returned `'tset5'`.
#
# If we want the directories that end with *two* digits, we can do the following.
# In[204]:
glob.glob('tset[0-9][0-9]')
# The character range wildcard also works on letters.
# In[205]:
glob.glob('t[a-z][a-z]t?')
# This matches files or directories that begin with a `'t'` and are followed by two letters, a `'t'`, and a single character.
#
# An alternative way of getting the same result is as follows.
# In[206]:
glob.glob('t??t?')
# This is because we don't have any files or directories with numbers in the second and third positions.
#
# Let's clean up our directory.
# In[207]:
get_ipython().system('rm -rf test*')
get_ipython().system('rm -rf tset*')
# `glob` [**DOCS**](https://docs.python.org/3/library/glob.html)
# ## `subprocess`
# [**DOCS**](https://docs.python.org/3/library/subprocess.html)
#
# >A running program is called a **process**.
#
# It contains code and its associated activity (or state). For example, this includes memory, lists of open files, etc.
#
# Programs, which are processes, can also create new processes. These are known as **subprocesses** and independently of the processes which created (or spawned) them. This means this new process can run at the same time as the original.
#
# Python's `subprocess` module provides an interface for creating and working with additional processes.
#
# When might we want to spawn new processes? One example is executing a Python script—much like you would from the command line—within Python. Although we know that we can use the `!` to run shell commands, this only works from within the notebook. So, let's use `subprocess` to execute a Python script in `scripts/` named `simple.py`.
# In[208]:
import subprocess
# In[209]:
subprocess.check_output(['python', '../scripts/simple.py'])
# This file print's `IOKN2K!` (and a new line character, `\n`), which is an abbreviation for, "it's okay not to know!"
#
# With `check_output()`, the command to be executed must be passed in as a list. Each argument of the command should be a separate list element. `check_output()` lets us execute an external command and collect its output.
#
# The `b''` prefex indicates that the returned value is a bytes type as opposed to a `str` type. If needed, we can convert this using the following.
# In[210]:
subprocess.check_output(['python', '../scripts/simple.py']).decode('utf-8')
# `subprocess` [**DOCS**](https://docs.python.org/3/library/subprocess.html)
| 24.824132 | 554 | 0.686216 |
ction for data. All data in a Python program is represented by objects or by relations between objects.
#
# Every object in Python has a **type**, a **value**, and an **identity**. We've already seen several data types, such as `int`, `float`, and `str`. An object's type determines its supported operations as well as the possible values it can take.
#
# In some cases, an object's value can change. We call these type of objects *mutable*. Objects whose values cannot be changed are known as *immutable*. The object type determines its mutability. Numbers and strings, for example, are immutable; lists and dictionaries, which we'll cover shortly, are mutable.
#
# To make this concrete, let's describe what an object's identity is. This can be thought of as an object's address in memory. Specifically, it's the memory address for the *value* of the object. Once an object has been created, it's identity never changes.
x = 'hello'
hex(id(x))
#
# What happens if we create a new variable, `y`, and set it equal to `x`?
# In[3]:
y = x
# In[4]:
hex(id(y))
# In[5]:
hex(id(x))
# The address in memory is the same because both variables *point* to (or reference) the same *value*.
#
# Now, let's make `x` take on some other value.
x = 'goodbye'
hex(id(x))
# In[8]:
x = 'hello'
# In[9]:
hex(id(x))
# `x` is once again pointing to the memory address associated with `'hello'`.
#
# What does this have to do with mutability? It seems as though we were actually able to change `x`'s value. To answer this, we'll show an example using a mutable object—a list in this case.
# In[10]:
a = [1, 2, 3]
# In[11]:
hex(id(a))
# In[12]:
a.append(4)
a
# In[13]:
hex(id(a))
# Notice what happened. We added `4` to the list, but the memory address *did not* change. This is what is means to be mutable. The value in memory address `0x107f26608` was originally `[1, 2, 3]`, but is now `[1, 2, 3, 4]`. The address in memory for this object's value will never change.
a.append('#python')
a
hex(id(a))
# In[16]:
b = a
# In[17]:
b
# In[18]:
hex(id(b))
# That makes sense. `a` and `b` both reference the same object—`[1, 2, 3, 4, '
#
# >Assignment statements in Python do not copy objects, they create bindings between a target and an object.
#
# If we modify `b`, what will happen to `a`?
# In[19]:
b[-1] = 'Python'
# In[20]:
b
# In[21]:
a
# In[22]:
hex(id(a)) == hex(id(b))
# The changes made to `b` have affected `a` because they both point to the same data. It's possible that this behavior is unwanted. As a solution, we can make a copy of the object so that modifying one does not affect the other. To do so, we can use the built-in `copy` module.
import copy
c = copy.copy(a)
hex(id(a)) == hex(id(c))
hex(id(a[-1]))
hex(id(c[-1]))
# In[28]:
c[-1] = 'PYTHON'
# In[29]:
c
# In[30]:
a
# What if we were dealing with nested mutable? For this, we'll use a dictionary.
d0 = {'key' : {'nested' : 'thing'}}
d1 = copy.copy(d0)
d1
d1['key']['nested'] = 'dict'
d0 == d1
d0
d0 = {'key' : {'nested' : 'thing'}}
d1 = copy.deepcopy(d0)
d1['key']['nested'] = 'dict'
d0 == d1
d0
d1
a', 'valid', 'list']
# In[43]:
[True, False, True]
# It's also fine to have a list with different element types.
[1, 2.0, 'three']
[350, 'barrows', 'hall', ['berkeley', 'CA']]
#
# ```python
# [[[1, 2], [3, 4, [5, 6]]], [7, 8, 9]]
# ```
#
# Speaking of accessing elements, let's describe how to do that. We'll first create a new list and assign it to a variable called `first_list`.
# In[46]:
first_list = [9, 8, 7.0, 6, 5.4]
# To access list elements, we use the square bracket notation. For example, if we're interested in the middle element—the "two-eth" element—we use the following.
first_list[2]
first_list[-1]
#
# To motivate this, let's label the indices of our list.
# In[49]:
first_list[0:2]
# This is how Python has decided to make this operator work. This isn't intuitive, but thinking about it in the following way might help. If we consider the indices to be to the *left* of each item, we can think of the slice operator as accessing elements *between* those indices.
# In[50]:
first_list[10]
# _from Raymond Hettinger_
#
# If, however, I try to access the same item with a slicing operation, e.g. `first_list[10:11]`, there is no error. Why?
# In[51]:
first_list[10:11]
# With lists, because they are mutable, we can modify elements.
# In[52]:
first_list[-1] = 5.43
# In[53]:
first_list
# #### Dictionaries
# A dictionary is a mapping from *keys* to *values*, where the keys, which must be unique, can be (almost) any type. A key and its associated value is referred to as a *key-value pair* or item. Dictionaries can be thought of as *unordered* key-value pairs.
#
# There are several ways to construct a dictionary. We can use braces (`{}`) or the built-in `dict()` function.
# In[54]:
{}
# In[55]:
dict()
# Of course, these are empty. Let's add comma separated key-value pairs to the first and use the assignment operator (`=`) for the second.
{'one' : 1, 'two' : 2}
dict(one=1, two=2)
# In[58]:
nums = {'one' : 1, 'two' : 2, 'three' : 3, 'four' : 4, 'five' : 5, 'six' : 6}
# In[59]:
nums
# Notice that the key-value pairs are *not* in the order we specified when creating the dictionary. This isn't a problem, though, because we use the keys to look up the corresponding values. We do this using bracket notation, like we did with strings and lists.
nums['five']
# In[61]:
nums['seven']
# We can add the value for 'seven' by doing the following:
# In[62]:
nums['seven'] = 7
# In[63]:
nums
# We mentioned earlier that keys can be of almost any type. Values *can* be of any type and we can also mix types.
# In[64]:
mixed = {'one' : 1.0, 'UC Berkeley' : 'Cal', 350 : ['Barrows', 'Hall']}
# In[65]:
mixed
# In this example, we used string and integer keys. We could have actually used any *immutable* objects.
#
# Notice that we used a list as a value, which is valid. What if we tried using a list, which is mutable, as a key?
# In[66]:
{['this'] : 'will not work'}
# We get a `TypeError` saying that we can't use an unhashable type. What does this mean? In Python, dictionaries are implemented using hash tables. Hash tables use hash functions, which return integers given particular values (keys), to store and look up key-value pairs. For this to work, though, the keys have to be immutable, which means they can't be changed.
# #### Tuples
# A tuple is a sequence of values. The values, which are indexed by integers, can be of any type. This sounds a lot like lists, right?
#
# >Though tuples may seem similar to lists, they are often used in different situations and for different purposes. Tuples are immutable, and usually contain an heterogeneous sequence of elements.... Lists are mutable, and their elements are usually homogeneous....
#
# By convention, a tuple's comma-separated values are surrounded by parentheses.
(1, 2, 3)
# In[68]:
t = 1, 2, 3
# In[69]:
type(t)
# The commas are what define the tuple. In fact, any set of multiple comma-separated objects *without* identifying symbols, such as brackets for lists, default to tuples.
#
# We can't create a tuple with a single element using the following syntax.
type((1))
type((1,))
a, b, c = t
print(a, b, c)
t = ('a', 'b', 'c', 'd')
t[0]
t[1:3]
t[0] = 'A'
t0 = 'A',
t1 = t[1:]
t0 + t1
, 6, 5, 3, 5, 8, 9, 7, 9, 3}
# In[81]:
basket = ['apple', 'orange', 'apple', 'pear', 'orange', 'banana']
# We can find the unique fruits by using the `set()` function.
# In[82]:
set(basket)
# Unlike other built-in Python data structures, sets support differencing.
# In[83]:
{1, 2, 3} - {2}
# In[84]:
{1, 2, 3} - {1, 2, 3}
# Sets are useful for finding unique values and for performing mathematical operations like the ones previously mentioned.
#
# Python also provides "specialized" container types in its `collections` module. These are alternatives or, rather, complements, to Python's general-purpose, built-in containers that we've just covered‐lists, dictionaries, tuples, and sets. For more information on these other data structures, see [the documentation](https://docs.python.org/3.5/library/collections.html#module-collections).
#
# In the following section, we'll explore several operators that the data structures covered above respond to.
he arithmetic operators are the ones you're probably most familiar with. These include `+`, `-`, `*`, `/`, and `**` to name a few. Of course, not all of these work on all Python data types.
[1, 2, 3] + [4, 5, 6]
(1, 2, 3) + (4, 5, 6)
['Cal'] * 3
('D-Lab',) * 3
]
[0, 2, 3] < [1, 2, 3]
[0, 20, 30] < [1, 2, 3]
(0, 20, 30) < (1, 2, 3)
(0, 1, 2) == (0, 1, 3)
[0, 3, 4] < [1, 2, 9]
set([0, 3, 4]) < set([1, 2, 9])
{'one' : 1} == {'one' : 1}
{'one' : 1} < {'one' : 1}
1, 2)
99 in {1868, 350, 102}
cities = {'Berkeley' : 'California',
'Miami' : 'Florida',
'New York' : 'New York',
'Seattle' : 'Washington'}
'Berkeley' in cities
99 not in {1868, 350, 102}
a = 1868
b = 1868
a is b
hex(id(a)), hex(id(b))
b = a
a is b
a = 'this'
b = 'that'
a is not b
e already seen string methods. For example, from day zero:
#
# ```python
# >>> my_string = 'Dav Clark wears a beret'
# >>> my_string = my_string.replace('beret', 'speedo')
# >>> print(my_string)
# Dav Clark wears a speedo
# ```
#
# Here, `.replace()` is the method.
#
# Python data structures have methods, too.
# #### Lists
# Let's use `first_list`, which we created above, to demonstrate some list functions and methods.
# In[114]:
len(first_list)
# What about the largest and smallest values
# In[115]:
max(first_list), min(first_list)
# Let's say we wanted to add an element to `first_list`. For this, we can use the `.append()` method.
first_list.append(2)
#
# By default, `.append()` adds an element to the *end* of a given list.
# In[117]:
first_list
# Notice how we invoked this method. We did not use an assignment operator (e.g., `x = x.append(y)`). This is because—and this is important—list methods are all void, which means that they *modify* lists and return `None`.
#
# Sometimes when we're adding elements to a list, we may with to insert it in a given position. For this, we can use the `.insert()` method. It takes two arguments—the first is the *position* and the second is the *value*. Let's say we wanted to add an item to the front of the list. We could do it using:
# In[118]:
first_list.insert(0, 10)
# In[119]:
first_list
# Let's append another value to the list.
first_list.append(2)
# In[121]:
first_list.count(2)
# In[122]:
first_list
# Let's say we wanted to remove one of the `2`s.
first_list.remove(2)
first_list
first_list.index(5.43)
tionaries. It returns the number of items in the object.
# In[127]:
len(nums)
# We might be interested in getting a list of the keys in `nums`. The `.keys()` method returns a list with this information.
# In[128]:
nums.keys()
# We can do the same for values.
# In[129]:
nums.values()
# To add to the dictionary, we can use the `.update()` method.
# In[130]:
nums.update(eight=8)
# In[131]:
nums
# Notice that we don't use quotation marks around the key name `eight`.
# In[132]:
nums.pop('one')
# In[133]:
nums
# We've successfully removed `{'one' : 1}` from `nums`.
)
s
{1, 2, 3} & {3, 4, 5}
{0, 1} | {1, 2}
{1, 2, 3} ^ {3, 4, 5}
{1, 2, 3} > {2, }
# In[145]:
for c in text_var:
print(c)
# With strings, the `for` statement iterates over each character. With lists (or tuples), on the other hand, each list element is iterated over.
# In[146]:
list_var = [350, 'Barrows', 'Hall']
# In[147]:
for e in list_var:
print(e)
# With dictionaries, `for` loops iterate over keys.
# In[148]:
for k in {'one' : 1, 'two' : 2, 'three' : 3}:
print(k, end=" ")
# If we'd like a loop that iterates a given number of times or over a sequence of numbers, we can use the `range` object.
for v in range(4):
print(v, end=" ")
rint(n, end=" ")
n += 1
print('\ndone')
# In[151]:
n = 4
while n < 4:
print(n, end=" ")
n += 1
print('\ndone')
# Above, because the condition evaluates to `False`, the loop body is skipped and the first statement after the `while` loop is executed.
#
# Second, some `while` loops may run indefinitely. This is referred to as an infinite loop and happens when the condition *never* evaluates to `False`. Here is an example.
#
# ```Python
# n = 4
# while n >= 4:
# print(n, end=" ")
# n += 1
# print('\ndone')
# ```
# ### `if`
# In many cases, it's useful to control the order in which statements or function calls are executed or evaluated. A control flow statement determines which path or paths in a program should be followed. Control flow statements, for example, can:
x = 0
if x == 0:
print('x is zero')
x = 1
if x == 0:
print('x is zero')
if x == 0:
print('x is zero')
else:
print('x is not zero')
if x == 0:
print('x is zero')
elif x < 0:
print('x is negative')
elif x > 0:
print('x is positive')
#
# What if `x` is *not* numeric? With the code as is, we'll get a `TypeError`. So, let's generalize what we have and wrap it in a function.
# In[158]:
def x_is(x):
if type(x) is str:
print('x is str')
elif type(x) in [int, float]:
if x == 0:
print('x is zero')
elif x < 0:
print('x is negative')
elif x > 0:
print('x is positive')
else:
print('invalid x value')
# Before we call our function, let's explain what's going on. Our function, as defined, is an example of a "nested conditional." We first perform a type check and, if `x` is numeric, there are another set of conditions which are checked.
# In[159]:
x_is('ucb')
# In[160]:
x_is(1)
# In[161]:
x_is(0)
# In[162]:
x_is([1, 2, 3])
# In[163]:
x_is(None)
# Control flow [**DOCS**](https://docs.python.org/3/tutorial/controlflow.html)
# ## Input and Output
# [**DOCS**](https://docs.python.org/3/tutorial/inputoutput.html#reading-and-writing-files)
#
# Interacting with data in files is a common task in Python. These can be plain text files, comma-delimited (CSV) files, or any other number of file formats.
#
# To open files, we can use the built-in `open()` function. There is a file named `lorem-ipsum.txt` in the `data/` directory that we'll use to learn about file input and output.
f = open('../data/01_lorem-ipsum.txt', 'r')
#
# The `.read(size)` method reads the contents of the file object. The optional numeric argument, *`size`*, corresponds to the number of bytes that should be read. This is useful if the data file is large. If we omit `size`, the entire contents of the file will be read and returned.
# In[165]:
f.read()
# What happens if we try to call `f.read()` again? "If the end of the file has been reached, `f.read()` will return an empty string (`''`)." In this situation, the "cursor" is at the end of the file and has nothing more to read.
#
# Because we'd like to show a few other methods, we can return to the beginning of the file using the `.seek()` method, passing in `0` as the argument.
f.seek(0)
# In[167]:
f.readline()
# In[168]:
f.readline()
# And so on.
#
# If you ever need to know the file object's current position, use the `.tell()` method.
f.tell()
# In[170]:
f.seek(0)
# In[171]:
for line in f:
print(line)
# When we're done interacting with a file, that fie should always be closed.
f.close()
f.closed
# In[174]:
with open('../data/01_lorem-ipsum.txt', 'r') as f:
for line in f:
print(line)
# We can also check that the file was, indeed, closed.
# In[175]:
f.closed
# What about writing to a file? There are two primary modes we can use for this: `'w'` for writing only and `'a'` for appending to a file. If a file opened in `'w'` mode already exists, it will be overwritten. Opening a file in `'a'` mode simply allows lines to be added to the end of an existing file.
#
# Let's start by creating a new file.
with open('first-write.txt', 'w') as f:
f.write('this is our first line\n')
f.write('this is our last line')
# In[177]:
with open('first-write.txt', 'r') as f:
for line in f:
print(line)
# Note that while we've been using `f` to identify our file object, we can use any valid variable name.
# In[178]:
with open('first-write.txt', 'a') as append_file:
append_file.write('\nthis is the real last line')
# Notice that we add a new line character to the beginning of this third line.
# In[179]:
with open('first-write.txt') as infile:
for row in infile:
print(row)
# In the code above, we use `row` where we had previously used `line`. We did that to serve as a reminder that the variable names used are not special in any way. It is, however, always a good idea to use descriptive variable names that make reading the code more understandable. This is part of making code "readable." For a bit more on this, see [here](http://docs.python-guide.org/en/latest/writing/style/), [here](https://www.python.org/dev/peps/pep-0008/), and [here](https://github.com/amontalenti/elements-of-python-style).
#
# The `open()` function can take a variety of file types. We've seen examples of how to use this with a `.txt` file.
# In[180]:
with open('../data/01_roster.csv', 'r') as roster:
for student_data in roster:
print(student_data)
# This file includes some made-up student information—a four-digit ID number, academic status, and demographic data.
#
# In some cases—say, if we need to calculate the average age of these students—we don't actually want to iterate over the first row, which is often called the "header."
with open('../data/01_roster.csv', 'r') as roster:
next(roster)
for student_data in roster:
print(student_data)
#
# Now, let's say we wanted to create a list of the six student ages. How might we go about doing that? One approach might be to split each line on commas to extract the age. This would work expect for the fact that student `2109`'s department *includes* a comma in the value.
#
# To help with situations like these, Python has a built-in `csv` module which includes lots of functionality for working with these types of types. Let's show how we could use this to calculate the average age of the students.
import csv
ages = []
with open('../data/01_roster.csv', 'r') as f:
next(f)
roster = csv.reader(f, delimiter=',', quotechar='"')
for student_data in roster:
ages.append(int(student_data[3]))
# The `reader()` function allows us to specify the delimiter and the quote character. The quote character, in this case, is the quotation mark (`"`). CSV files often wrap string values in quotes (or other characters) if they include the delimiter within them. The `reader()` function parses each line as a list of strings, taking into consideration the delimiter and quote character. This is why we can select the third element in `student_data` and why we change (or cast) the type to `int`. As we iterate over each line, we add the age value to `ages`.
ages_mean = sum(ages) / len(ages)
print('The average age of students in the roster is: %.2f' % ages_mean)
import os
# In[187]:
os.getcwd()
# We know we have a `data/` directory in our repository, but we might not know its contents. We can get that information by using the following.
# In[188]:
os.listdir('../data/')
# This results in a list of the entries in the directory (excluding `.` and `..`). Notice that we're able to specify a *relative* path with `listdir()`.
os.path.isdir('../data/')
os.path.isfile('../data/01_roster.csv')
# In[191]:
os.mkdir('newdir')
# Let's check the contents of the current directory.
os.listdir()
os.rmdir('newdir')
ort glob
# In[195]:
glob.glob('*')
# Notice that the list does not include `.ipynb_checkpoints`.
#
# Let's use `glob` to show only the `.ipynb` files.
glob.glob('*.ipynb')
glob.glob('*/')
# In[198]:
get_ipython().system('mkdir test')
get_ipython().system('mkdir test1')
get_ipython().system('mkdir test10')
get_ipython().system('mkdir test100')
get_ipython().system('touch test.txt')
# Note that the `!` before each line above allows us to run shell commands from within the notebook.
# In[199]:
glob.glob('test*')
# This returns any file or directory that begins with `'test'` and end with any (or no other) character.
#
# We can also match directories only.
# In[200]:
glob.glob('test*/')
# To match a single character, we can use the `?` wildcard character. This matches any character in the specified position of the name.
# In[201]:
glob.glob('test?')
# In this case, the only match is `test1`, which we know is a directory.
# Next, let's show what the character range (`[]`) wildcard can do. We'll create a few more directories (we'll clean this up when we're done).
# In[202]:
get_ipython().system('mkdir tset0')
get_ipython().system('mkdir tset1')
get_ipython().system('mkdir tset5')
get_ipython().system('mkdir tset10')
get_ipython().system('mkdir tset50')
# The character range wildcard matches a single character in the specified range.
# In[203]:
glob.glob('tset[0-1]')
# The code above matches files or directories that start with `tset` and that end with either `0` or `1`. If we were to have used `0-9` in the brackets, it would have also returned `'tset5'`.
#
# If we want the directories that end with *two* digits, we can do the following.
# In[204]:
glob.glob('tset[0-9][0-9]')
# The character range wildcard also works on letters.
# In[205]:
glob.glob('t[a-z][a-z]t?')
# This matches files or directories that begin with a `'t'` and are followed by two letters, a `'t'`, and a single character.
#
# An alternative way of getting the same result is as follows.
# In[206]:
glob.glob('t??t?')
# This is because we don't have any files or directories with numbers in the second and third positions.
# In[207]:
get_ipython().system('rm -rf test*')
get_ipython().system('rm -rf tset*')
# `glob` [**DOCS**](https://docs.python.org/3/library/glob.html)
# ## `subprocess`
# [**DOCS**](https://docs.python.org/3/library/subprocess.html)
#
# >A running program is called a **process**.
#
# It contains code and its associated activity (or state). For example, this includes memory, lists of open files, etc.
#
# Programs, which are processes, can also create new processes. These are known as **subprocesses** and independently of the processes which created (or spawned) them. This means this new process can run at the same time as the original.
#
# Python's `subprocess` module provides an interface for creating and working with additional processes.
# In[208]:
import subprocess
# In[209]:
subprocess.check_output(['python', '../scripts/simple.py'])
# This file print's `IOKN2K!` (and a new line character, `\n`), which is an abbreviation for, "it's okay not to know!"
#
# With `check_output()`, the command to be executed must be passed in as a list. Each argument of the command should be a separate list element. `check_output()` lets us execute an external command and collect its output.
#
# The `b''` prefex indicates that the returned value is a bytes type as opposed to a `str` type. If needed, we can convert this using the following.
# In[210]:
subprocess.check_output(['python', '../scripts/simple.py']).decode('utf-8')
# `subprocess` [**DOCS**](https://docs.python.org/3/library/subprocess.html)
| true | true |
f71ee34b346861ec238e28be1637923444343508 | 763 | py | Python | test/basic/test_oplog.py | KentWangYQ/mongo2es | 8952640e8ac3f2b1aa6845082fce04b7c4f7bd1e | [
"Apache-2.0"
] | 5 | 2018-12-24T10:45:56.000Z | 2019-07-29T07:26:28.000Z | test/basic/test_oplog.py | KentWangYQ/mongo2es | 8952640e8ac3f2b1aa6845082fce04b7c4f7bd1e | [
"Apache-2.0"
] | null | null | null | test/basic/test_oplog.py | KentWangYQ/mongo2es | 8952640e8ac3f2b1aa6845082fce04b7c4f7bd1e | [
"Apache-2.0"
] | 2 | 2019-07-30T06:27:49.000Z | 2021-09-24T08:21:52.000Z | import datetime
import bson
from common.mongo import oplog
from common import event_emitter
mo = oplog.MongoOplog('mongodb://eslocal:PHuance01@172.16.100.150,172.16.100.151,172.16.100.152/?replicaSet=foobar',
ts=bson.Timestamp(1524735047, 1))
@event_emitter.on(mo.event_emitter, 'data')
def on_data(data):
# pass
print(data)
@event_emitter.on(mo.event_emitter, 'insert')
def on_data(data):
pass
@event_emitter.on(mo.event_emitter, 'update')
def on_data(data):
pass
@event_emitter.on(mo.event_emitter, 'delete')
def on_data(data):
pass
@event_emitter.on(mo.event_emitter, 'cmd')
def on_data(data):
pass
@event_emitter.on(mo.event_emitter, 'noop')
def on_data(data):
# print(data)
pass
mo.tail()
| 17.744186 | 116 | 0.70118 | import datetime
import bson
from common.mongo import oplog
from common import event_emitter
mo = oplog.MongoOplog('mongodb://eslocal:PHuance01@172.16.100.150,172.16.100.151,172.16.100.152/?replicaSet=foobar',
ts=bson.Timestamp(1524735047, 1))
@event_emitter.on(mo.event_emitter, 'data')
def on_data(data):
print(data)
@event_emitter.on(mo.event_emitter, 'insert')
def on_data(data):
pass
@event_emitter.on(mo.event_emitter, 'update')
def on_data(data):
pass
@event_emitter.on(mo.event_emitter, 'delete')
def on_data(data):
pass
@event_emitter.on(mo.event_emitter, 'cmd')
def on_data(data):
pass
@event_emitter.on(mo.event_emitter, 'noop')
def on_data(data):
pass
mo.tail()
| true | true |
f71ee3fea2be9796b955bd8e5429594c3c36ac84 | 886 | py | Python | tests/test_vrp_constructive.py | MichaelAllen1966/2004_covid_dialysis | 62c6842fc14acee07aee12ac2f238cbd1c3881d6 | [
"MIT"
] | null | null | null | tests/test_vrp_constructive.py | MichaelAllen1966/2004_covid_dialysis | 62c6842fc14acee07aee12ac2f238cbd1c3881d6 | [
"MIT"
] | 1 | 2020-05-15T11:11:53.000Z | 2020-05-15T11:11:53.000Z | tests/test_vrp_constructive.py | MichaelAllen1966/2004_covid_dialysis | 62c6842fc14acee07aee12ac2f238cbd1c3881d6 | [
"MIT"
] | null | null | null | '''
Tests for vrp package construction module
'''
import pytest
import vrp.constructive as cn
import vrp.io as io
@pytest.mark.parametrize("warehouse, selected_sectors, expected_cost",
[
('L51', ['L51', 'L1', 'L10', 'L100','L101'], 196.1*2),
('L51', ['L51', 'L1', 'L10'], 116.7*2),
('L51', ['L51', 'L1', 'L102', 'L104','L11'], 239.6*2)
]
)
def test_capacity_one_cost(warehouse, selected_sectors, expected_cost):
'''
Test single occupancy cost (warehouse->city_i, city_i<-warehouse)
'''
matrix = io.trim_matrix(io.load_travel_distance(), selected_sectors)
cost = cn.single_capacity_cost(warehouse, matrix)
#allowing for minor floating point difference
assert pytest.approx(cost) == expected_cost
| 31.642857 | 80 | 0.563205 | import pytest
import vrp.constructive as cn
import vrp.io as io
@pytest.mark.parametrize("warehouse, selected_sectors, expected_cost",
[
('L51', ['L51', 'L1', 'L10', 'L100','L101'], 196.1*2),
('L51', ['L51', 'L1', 'L10'], 116.7*2),
('L51', ['L51', 'L1', 'L102', 'L104','L11'], 239.6*2)
]
)
def test_capacity_one_cost(warehouse, selected_sectors, expected_cost):
matrix = io.trim_matrix(io.load_travel_distance(), selected_sectors)
cost = cn.single_capacity_cost(warehouse, matrix)
assert pytest.approx(cost) == expected_cost
| true | true |
f71ee406bcdf8fe6a12c7bd165c46a5f00d242d8 | 609 | py | Python | ejemplo_barra_progreso.py | jlaica/ejercicios_tkinter | 4656c26b83957ee4742c4b28611ec048254dcc69 | [
"MIT"
] | null | null | null | ejemplo_barra_progreso.py | jlaica/ejercicios_tkinter | 4656c26b83957ee4742c4b28611ec048254dcc69 | [
"MIT"
] | null | null | null | ejemplo_barra_progreso.py | jlaica/ejercicios_tkinter | 4656c26b83957ee4742c4b28611ec048254dcc69 | [
"MIT"
] | null | null | null | import sys
from tkinter import *
from tkinter import ttk
import time
myGui = Tk()
myGui.geometry('300x200')
myGui.title('Retro Code 80s')
style = ttk.Style()
style.configure("black.Horizontal.TProgressbar", background='black')
bar = ttk.Progressbar(myGui,orient ="horizontal",length = 200, style='black.Horizontal.TProgressbar',mode ="determinate")
bar["maximum"] = 100
bar.place(x=50,y=80)
Label(myGui, text = 'Retro Code').place(x=120,y=50)
for i in range(1,101,1):
bar["value"] = i
bar.update()
time.sleep(0.5)
Label(myGui, text = str(i)+' %').place(x=135,y=100)
myGui.mainloop()
| 23.423077 | 121 | 0.688013 | import sys
from tkinter import *
from tkinter import ttk
import time
myGui = Tk()
myGui.geometry('300x200')
myGui.title('Retro Code 80s')
style = ttk.Style()
style.configure("black.Horizontal.TProgressbar", background='black')
bar = ttk.Progressbar(myGui,orient ="horizontal",length = 200, style='black.Horizontal.TProgressbar',mode ="determinate")
bar["maximum"] = 100
bar.place(x=50,y=80)
Label(myGui, text = 'Retro Code').place(x=120,y=50)
for i in range(1,101,1):
bar["value"] = i
bar.update()
time.sleep(0.5)
Label(myGui, text = str(i)+' %').place(x=135,y=100)
myGui.mainloop()
| true | true |
f71ee4a4d67144c392ef4063c47fef4809302ba0 | 418 | py | Python | tests/test_local.py | ngoet/text-sent-api | eccfc2e65269350da0f60d5f014374433b4794b8 | [
"MIT"
] | null | null | null | tests/test_local.py | ngoet/text-sent-api | eccfc2e65269350da0f60d5f014374433b4794b8 | [
"MIT"
] | null | null | null | tests/test_local.py | ngoet/text-sent-api | eccfc2e65269350da0f60d5f014374433b4794b8 | [
"MIT"
] | null | null | null | """
Local endpoint tests.
Requires that the app is running locally (run `uvicorn main:app --reload`)
"""
import os
import pytest
from tests.endpoint_scenarios import SCENARIOS
from tests.endpoint_test_utils import run_endpoint_test
os.environ["LOCAL"] = "True"
@pytest.mark.parametrize("scenario", list(SCENARIOS.values()), ids=SCENARIOS.keys())
def test_eval(scenario):
run_endpoint_test(scenario)
| 20.9 | 84 | 0.753589 |
import os
import pytest
from tests.endpoint_scenarios import SCENARIOS
from tests.endpoint_test_utils import run_endpoint_test
os.environ["LOCAL"] = "True"
@pytest.mark.parametrize("scenario", list(SCENARIOS.values()), ids=SCENARIOS.keys())
def test_eval(scenario):
run_endpoint_test(scenario)
| true | true |
f71ee5485fea9a858d5cc8b330eb0e30bd658488 | 720 | py | Python | example/nonlinear.py | Cjkkkk/Pyflow | 22fd5b0141fac33ba19daa5217eee33df060a457 | [
"MIT"
] | 4 | 2019-12-18T02:05:15.000Z | 2021-09-18T09:45:22.000Z | example/nonlinear.py | Cjkkkk/Pyflow | 22fd5b0141fac33ba19daa5217eee33df060a457 | [
"MIT"
] | null | null | null | example/nonlinear.py | Cjkkkk/Pyflow | 22fd5b0141fac33ba19daa5217eee33df060a457 | [
"MIT"
] | null | null | null | import flow
from flow.module import Module, Linear
from flow.optim import SGD
from flow import function as F
import numpy as np
class Net(Module):
def __init__(self):
super().__init__()
self.fc1 = Linear(2, 10)
self.fc2 = Linear(10, 1)
def forward(self, a):
x = self.fc1(a)
y = F.relu(x)
z = self.fc2(y)
return z
# y = 3x_1^2 + 2x_2
model = Net()
optim = SGD(model.parameters(), lr = 0.005)
for i in range(100):
input = flow.randn((1, 2))
output = model(input)
target = 3 * input[0, 0] * input[0, 0] + 2 * input[0, 1]
loss = F.square_loss(output, target)
loss.backward()
optim.step()
optim.zero_grad()
print("loss", loss) | 24 | 60 | 0.5875 | import flow
from flow.module import Module, Linear
from flow.optim import SGD
from flow import function as F
import numpy as np
class Net(Module):
def __init__(self):
super().__init__()
self.fc1 = Linear(2, 10)
self.fc2 = Linear(10, 1)
def forward(self, a):
x = self.fc1(a)
y = F.relu(x)
z = self.fc2(y)
return z
model = Net()
optim = SGD(model.parameters(), lr = 0.005)
for i in range(100):
input = flow.randn((1, 2))
output = model(input)
target = 3 * input[0, 0] * input[0, 0] + 2 * input[0, 1]
loss = F.square_loss(output, target)
loss.backward()
optim.step()
optim.zero_grad()
print("loss", loss) | true | true |
f71ee7a978b5915f4678383805f77825d8dc0762 | 18,825 | py | Python | mindarmour/fuzz_testing/model_coverage_metrics.py | hboshnak/mindarmour | 0609a4eaea875a84667bed279add9305752880cc | [
"Apache-2.0"
] | null | null | null | mindarmour/fuzz_testing/model_coverage_metrics.py | hboshnak/mindarmour | 0609a4eaea875a84667bed279add9305752880cc | [
"Apache-2.0"
] | null | null | null | mindarmour/fuzz_testing/model_coverage_metrics.py | hboshnak/mindarmour | 0609a4eaea875a84667bed279add9305752880cc | [
"Apache-2.0"
] | null | null | null | # Copyright 2019 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Model-Test Coverage Metrics.
"""
from abc import abstractmethod
from collections import defaultdict
import math
import numpy as np
from mindspore import Tensor
from mindspore import Model
from mindspore.train.summary.summary_record import _get_summary_tensor_data
from mindarmour.utils._check_param import check_model, check_numpy_param, check_int_positive, \
check_param_type, check_value_positive
from mindarmour.utils.logger import LogUtil
LOGGER = LogUtil.get_instance()
TAG = 'CoverageMetrics'
class CoverageMetrics:
"""
The abstract base class for Neuron coverage classes calculating coverage metrics.
As we all known, each neuron output of a network will have a output range after training (we call it original
range), and test dataset is used to estimate the accuracy of the trained network. However, neurons' output
distribution would be different with different test datasets. Therefore, similar to function fuzz, model fuzz means
testing those neurons' outputs and estimating the proportion of original range that has emerged with test
datasets.
Reference: `DeepGauge: Multi-Granularity Testing Criteria for Deep Learning Systems
<https://arxiv.org/abs/1803.07519>`_
Args:
model (Model): The pre-trained model which waiting for testing.
incremental (bool): Metrics will be calculate in incremental way or not. Default: False.
batch_size (int): The number of samples in a fuzz test batch. Default: 32.
"""
def __init__(self, model, incremental=False, batch_size=32):
self._model = check_model('model', model, Model)
self.incremental = check_param_type('incremental', incremental, bool)
self.batch_size = check_int_positive('batch_size', batch_size)
self._activate_table = defaultdict(list)
@abstractmethod
def get_metrics(self, dataset):
"""
Calculate coverage metrics of given dataset.
Args:
dataset (numpy.ndarray): Dataset used to calculate coverage metrics.
Raises:
NotImplementedError: It is an abstract method.
"""
msg = 'The function get_metrics() is an abstract method in class `CoverageMetrics`, and should be' \
' implemented in child class.'
LOGGER.error(TAG, msg)
raise NotImplementedError(msg)
def _init_neuron_activate_table(self, data):
"""
Initialise the activate table of each neuron in the model with format:
{'layer1': [n1, n2, n3, ..., nn], 'layer2': [n1, n2, n3, ..., nn], ...}
Args:
data (numpy.ndarray): Data used for initialising the activate table.
Return:
dict, return a activate_table.
"""
self._model.predict(Tensor(data))
layer_out = _get_summary_tensor_data()
if not layer_out:
msg = 'User must use TensorSummary() operation to specify the middle layer of the model participating in ' \
'the coverage calculation.'
LOGGER.error(TAG, msg)
raise ValueError(msg)
activate_table = defaultdict()
for layer, value in layer_out.items():
activate_table[layer] = np.zeros(value.shape[1], np.bool)
return activate_table
def _get_bounds(self, train_dataset):
"""
Update the lower and upper boundaries of neurons' outputs.
Args:
train_dataset (numpy.ndarray): Training dataset used for determine the neurons' output boundaries.
Return:
- numpy.ndarray, upper bounds of neuron' outputs.
- numpy.ndarray, lower bounds of neuron' outputs.
"""
upper_bounds = defaultdict(list)
lower_bounds = defaultdict(list)
batches = math.ceil(train_dataset.shape[0] / self.batch_size)
for i in range(batches):
inputs = train_dataset[i * self.batch_size: (i + 1) * self.batch_size]
self._model.predict(Tensor(inputs))
layer_out = _get_summary_tensor_data()
for layer, tensor in layer_out.items():
value = tensor.asnumpy()
value = np.mean(value, axis=tuple([i for i in range(2, len(value.shape))]))
min_value = np.min(value, axis=0)
max_value = np.max(value, axis=0)
if np.any(upper_bounds[layer]):
max_flag = upper_bounds[layer] > max_value
min_flag = lower_bounds[layer] < min_value
upper_bounds[layer] = upper_bounds[layer] * max_flag + max_value * (1 - max_flag)
lower_bounds[layer] = lower_bounds[layer] * min_flag + min_value * (1 - min_flag)
else:
upper_bounds[layer] = max_value
lower_bounds[layer] = min_value
return upper_bounds, lower_bounds
def _activate_rate(self):
"""
Calculate the activate rate of neurons.
"""
total_neurons = 0
activated_neurons = 0
for _, value in self._activate_table.items():
activated_neurons += np.sum(value)
total_neurons += len(value)
activate_rate = activated_neurons / total_neurons
return activate_rate
class NeuronCoverage(CoverageMetrics):
"""
Calculate the neurons activated coverage. Neuron is activated when its output is greater than the threshold.
Neuron coverage equals the proportion of activated neurons to total neurons in the network.
Args:
model (Model): The pre-trained model which waiting for testing.
threshold (float): Threshold used to determined neurons is activated or not. Default: 0.1.
incremental (bool): Metrics will be calculate in incremental way or not. Default: False.
batch_size (int): The number of samples in a fuzz test batch. Default: 32.
"""
def __init__(self, model, threshold=0.1, incremental=False, batch_size=32):
super(NeuronCoverage, self).__init__(model, incremental, batch_size)
threshold = check_param_type('threshold', threshold, float)
self.threshold = check_value_positive('threshold', threshold)
def get_metrics(self, dataset):
"""
Get the metric of neuron coverage: the proportion of activated neurons to total neurons in the network.
Args:
dataset (numpy.ndarray): Dataset used to calculate coverage metrics.
Returns:
float, the metric of 'neuron coverage'.
Examples:
>>> nc = NeuronCoverage(model, threshold=0.1)
>>> nc_metrics = nc.get_metrics(test_data)
"""
dataset = check_numpy_param('dataset', dataset)
batches = math.ceil(dataset.shape[0] / self.batch_size)
if not self.incremental or not self._activate_table:
self._activate_table = self._init_neuron_activate_table(dataset[0:1])
for i in range(batches):
inputs = dataset[i * self.batch_size: (i + 1) * self.batch_size]
self._model.predict(Tensor(inputs))
layer_out = _get_summary_tensor_data()
for layer, tensor in layer_out.items():
value = tensor.asnumpy()
value = np.mean(value, axis=tuple([i for i in range(2, len(value.shape))]))
activate = np.sum(value > self.threshold, axis=0) > 0
self._activate_table[layer] = np.logical_or(self._activate_table[layer], activate)
neuron_coverage = self._activate_rate()
return neuron_coverage
class TopKNeuronCoverage(CoverageMetrics):
"""
Calculate the top k activated neurons coverage. Neuron is activated when its output has the top k largest value in
that hidden layers. Top k neurons coverage equals the proportion of activated neurons to total neurons in the
network.
Args:
model (Model): The pre-trained model which waiting for testing.
top_k (int): Neuron is activated when its output has the top k largest value in that hidden layers. Default: 3.
incremental (bool): Metrics will be calculate in incremental way or not. Default: False.
batch_size (int): The number of samples in a fuzz test batch. Default: 32.
"""
def __init__(self, model, top_k=3, incremental=False, batch_size=32):
super(TopKNeuronCoverage, self).__init__(model, incremental=incremental, batch_size=batch_size)
self.top_k = check_int_positive('top_k', top_k)
def get_metrics(self, dataset):
"""
Get the metric of Top K activated neuron coverage.
Args:
dataset (numpy.ndarray): Dataset used to calculate coverage metrics.
Returns:
float, the metrics of 'top k neuron coverage'.
Examples:
>>> tknc = TopKNeuronCoverage(model, top_k=3)
>>> metrics = tknc.get_metrics(test_data)
"""
dataset = check_numpy_param('dataset', dataset)
batches = math.ceil(dataset.shape[0] / self.batch_size)
if not self.incremental or not self._activate_table:
self._activate_table = self._init_neuron_activate_table(dataset[0:1])
for i in range(batches):
inputs = dataset[i * self.batch_size: (i + 1) * self.batch_size]
self._model.predict(Tensor(inputs))
layer_out = _get_summary_tensor_data()
for layer, tensor in layer_out.items():
value = tensor.asnumpy()
if len(value.shape) > 2:
value = np.mean(value, axis=tuple([i for i in range(2, len(value.shape))]))
top_k_value = np.sort(value)[:, -self.top_k].reshape(value.shape[0], 1)
top_k_value = np.sum((value - top_k_value) >= 0, axis=0) > 0
self._activate_table[layer] = np.logical_or(self._activate_table[layer], top_k_value)
top_k_neuron_coverage = self._activate_rate()
return top_k_neuron_coverage
class SuperNeuronActivateCoverage(CoverageMetrics):
"""
Get the metric of 'super neuron activation coverage'. :math:`SNAC = |UpperCornerNeuron|/|N|`. SNAC refers to the
proportion of neurons whose neurons output value in the test set exceeds the upper bounds of the corresponding
neurons output value in the training set.
Args:
model (Model): The pre-trained model which waiting for testing.
train_dataset (numpy.ndarray): Training dataset used for determine the neurons' output boundaries.
incremental (bool): Metrics will be calculate in incremental way or not. Default: False.
batch_size (int): The number of samples in a fuzz test batch. Default: 32.
"""
def __init__(self, model, train_dataset, incremental=False, batch_size=32):
super(SuperNeuronActivateCoverage, self).__init__(model, incremental=incremental, batch_size=batch_size)
train_dataset = check_numpy_param('train_dataset', train_dataset)
self.upper_bounds, self.lower_bounds = self._get_bounds(train_dataset=train_dataset)
def get_metrics(self, dataset):
"""
Get the metric of 'strong neuron activation coverage'.
Args:
dataset (numpy.ndarray): Dataset used to calculate coverage metrics.
Returns:
float, the metric of 'strong neuron activation coverage'.
Examples:
>>> snac = SuperNeuronActivateCoverage(model, train_dataset)
>>> metrics = snac.get_metrics(test_data)
"""
dataset = check_numpy_param('dataset', dataset)
if not self.incremental or not self._activate_table:
self._activate_table = self._init_neuron_activate_table(dataset[0:1])
batches = math.ceil(dataset.shape[0] / self.batch_size)
for i in range(batches):
inputs = dataset[i * self.batch_size: (i + 1) * self.batch_size]
self._model.predict(Tensor(inputs))
layer_out = _get_summary_tensor_data()
for layer, tensor in layer_out.items():
value = tensor.asnumpy()
if len(value.shape) > 2:
value = np.mean(value, axis=tuple([i for i in range(2, len(value.shape))]))
activate = np.sum(value > self.upper_bounds[layer], axis=0) > 0
self._activate_table[layer] = np.logical_or(self._activate_table[layer], activate)
snac = self._activate_rate()
return snac
class NeuronBoundsCoverage(SuperNeuronActivateCoverage):
"""
Get the metric of 'neuron boundary coverage' :math:`NBC = (|UpperCornerNeuron| + |LowerCornerNeuron|)/(2*|N|)`,
where :math:`|N|` is the number of neurons, NBC refers to the proportion of neurons whose neurons output value in
the test dataset exceeds the upper and lower bounds of the corresponding neurons output value in the training
dataset.
Args:
model (Model): The pre-trained model which waiting for testing.
train_dataset (numpy.ndarray): Training dataset used for determine the neurons' output boundaries.
incremental (bool): Metrics will be calculate in incremental way or not. Default: False.
batch_size (int): The number of samples in a fuzz test batch. Default: 32.
"""
def __init__(self, model, train_dataset, incremental=False, batch_size=32):
super(NeuronBoundsCoverage, self).__init__(model, train_dataset, incremental=incremental, batch_size=batch_size)
def get_metrics(self, dataset):
"""
Get the metric of 'neuron boundary coverage'.
Args:
dataset (numpy.ndarray): Dataset used to calculate coverage metrics.
Returns:
float, the metric of 'neuron boundary coverage'.
Examples:
>>> nbc = NeuronBoundsCoverage(model, train_dataset)
>>> metrics = nbc.get_metrics(test_data)
"""
dataset = check_numpy_param('dataset', dataset)
if not self.incremental or not self._activate_table:
self._activate_table = self._init_neuron_activate_table(dataset[0:1])
batches = math.ceil(dataset.shape[0] / self.batch_size)
for i in range(batches):
inputs = dataset[i * self.batch_size: (i + 1) * self.batch_size]
self._model.predict(Tensor(inputs))
layer_out = _get_summary_tensor_data()
for layer, tensor in layer_out.items():
value = tensor.asnumpy()
if len(value.shape) > 2:
value = np.mean(value, axis=tuple([i for i in range(2, len(value.shape))]))
outer = np.logical_or(value > self.upper_bounds[layer], value < self.lower_bounds[layer])
activate = np.sum(outer, axis=0) > 0
self._activate_table[layer] = np.logical_or(self._activate_table[layer], activate)
nbc = self._activate_rate()
return nbc
class KMultisectionNeuronCoverage(SuperNeuronActivateCoverage):
"""
Get the metric of 'k-multisection neuron coverage'. KMNC measures how thoroughly the given set of test inputs
covers the range of neurons output values derived from training dataset.
Args:
model (Model): The pre-trained model which waiting for testing.
train_dataset (numpy.ndarray): Training dataset used for determine the neurons' output boundaries.
segmented_num (int): The number of segmented sections of neurons' output intervals. Default: 100.
incremental (bool): Metrics will be calculate in incremental way or not. Default: False.
batch_size (int): The number of samples in a fuzz test batch. Default: 32.
"""
def __init__(self, model, train_dataset, segmented_num=100, incremental=False, batch_size=32):
super(KMultisectionNeuronCoverage, self).__init__(model, train_dataset, incremental=incremental,
batch_size=batch_size)
self.segmented_num = check_int_positive('segmented_num', segmented_num)
self.intervals = defaultdict(list)
for keys in self.upper_bounds.keys():
self.intervals[keys] = (self.upper_bounds[keys] - self.lower_bounds[keys]) / self.segmented_num
def _init_k_multisection_table(self, data):
""" Initial the activate table."""
self._model.predict(Tensor(data))
layer_out = _get_summary_tensor_data()
activate_section_table = defaultdict()
for layer, value in layer_out.items():
activate_section_table[layer] = np.zeros((value.shape[1], self.segmented_num), np.bool)
return activate_section_table
def get_metrics(self, dataset):
"""
Get the metric of 'k-multisection neuron coverage'.
Args:
dataset (numpy.ndarray): Dataset used to calculate coverage metrics.
Returns:
float, the metric of 'k-multisection neuron coverage'.
Examples:
>>> kmnc = KMultisectionNeuronCoverage(model, train_dataset, segmented_num=100)
>>> metrics = kmnc.get_metrics(test_data)
"""
dataset = check_numpy_param('dataset', dataset)
if not self.incremental or not self._activate_table:
self._activate_table = self._init_k_multisection_table(dataset[0:1])
batches = math.ceil(dataset.shape[0] / self.batch_size)
for i in range(batches):
inputs = dataset[i * self.batch_size: (i + 1) * self.batch_size]
self._model.predict(Tensor(inputs))
layer_out = _get_summary_tensor_data()
for layer, tensor in layer_out.items():
value = tensor.asnumpy()
value = np.mean(value, axis=tuple([i for i in range(2, len(value.shape))]))
hits = np.floor((value - self.lower_bounds[layer]) / self.intervals[layer]).astype(int)
hits = np.transpose(hits, [1, 0])
for n in range(len(hits)):
for sec in hits[n]:
if sec >= self.segmented_num or sec < 0:
continue
self._activate_table[layer][n][sec] = True
kmnc = self._activate_rate() / self.segmented_num
return kmnc
| 45.691748 | 120 | 0.65259 |
from abc import abstractmethod
from collections import defaultdict
import math
import numpy as np
from mindspore import Tensor
from mindspore import Model
from mindspore.train.summary.summary_record import _get_summary_tensor_data
from mindarmour.utils._check_param import check_model, check_numpy_param, check_int_positive, \
check_param_type, check_value_positive
from mindarmour.utils.logger import LogUtil
LOGGER = LogUtil.get_instance()
TAG = 'CoverageMetrics'
class CoverageMetrics:
def __init__(self, model, incremental=False, batch_size=32):
self._model = check_model('model', model, Model)
self.incremental = check_param_type('incremental', incremental, bool)
self.batch_size = check_int_positive('batch_size', batch_size)
self._activate_table = defaultdict(list)
@abstractmethod
def get_metrics(self, dataset):
msg = 'The function get_metrics() is an abstract method in class `CoverageMetrics`, and should be' \
' implemented in child class.'
LOGGER.error(TAG, msg)
raise NotImplementedError(msg)
def _init_neuron_activate_table(self, data):
self._model.predict(Tensor(data))
layer_out = _get_summary_tensor_data()
if not layer_out:
msg = 'User must use TensorSummary() operation to specify the middle layer of the model participating in ' \
'the coverage calculation.'
LOGGER.error(TAG, msg)
raise ValueError(msg)
activate_table = defaultdict()
for layer, value in layer_out.items():
activate_table[layer] = np.zeros(value.shape[1], np.bool)
return activate_table
def _get_bounds(self, train_dataset):
upper_bounds = defaultdict(list)
lower_bounds = defaultdict(list)
batches = math.ceil(train_dataset.shape[0] / self.batch_size)
for i in range(batches):
inputs = train_dataset[i * self.batch_size: (i + 1) * self.batch_size]
self._model.predict(Tensor(inputs))
layer_out = _get_summary_tensor_data()
for layer, tensor in layer_out.items():
value = tensor.asnumpy()
value = np.mean(value, axis=tuple([i for i in range(2, len(value.shape))]))
min_value = np.min(value, axis=0)
max_value = np.max(value, axis=0)
if np.any(upper_bounds[layer]):
max_flag = upper_bounds[layer] > max_value
min_flag = lower_bounds[layer] < min_value
upper_bounds[layer] = upper_bounds[layer] * max_flag + max_value * (1 - max_flag)
lower_bounds[layer] = lower_bounds[layer] * min_flag + min_value * (1 - min_flag)
else:
upper_bounds[layer] = max_value
lower_bounds[layer] = min_value
return upper_bounds, lower_bounds
def _activate_rate(self):
total_neurons = 0
activated_neurons = 0
for _, value in self._activate_table.items():
activated_neurons += np.sum(value)
total_neurons += len(value)
activate_rate = activated_neurons / total_neurons
return activate_rate
class NeuronCoverage(CoverageMetrics):
def __init__(self, model, threshold=0.1, incremental=False, batch_size=32):
super(NeuronCoverage, self).__init__(model, incremental, batch_size)
threshold = check_param_type('threshold', threshold, float)
self.threshold = check_value_positive('threshold', threshold)
def get_metrics(self, dataset):
dataset = check_numpy_param('dataset', dataset)
batches = math.ceil(dataset.shape[0] / self.batch_size)
if not self.incremental or not self._activate_table:
self._activate_table = self._init_neuron_activate_table(dataset[0:1])
for i in range(batches):
inputs = dataset[i * self.batch_size: (i + 1) * self.batch_size]
self._model.predict(Tensor(inputs))
layer_out = _get_summary_tensor_data()
for layer, tensor in layer_out.items():
value = tensor.asnumpy()
value = np.mean(value, axis=tuple([i for i in range(2, len(value.shape))]))
activate = np.sum(value > self.threshold, axis=0) > 0
self._activate_table[layer] = np.logical_or(self._activate_table[layer], activate)
neuron_coverage = self._activate_rate()
return neuron_coverage
class TopKNeuronCoverage(CoverageMetrics):
def __init__(self, model, top_k=3, incremental=False, batch_size=32):
super(TopKNeuronCoverage, self).__init__(model, incremental=incremental, batch_size=batch_size)
self.top_k = check_int_positive('top_k', top_k)
def get_metrics(self, dataset):
dataset = check_numpy_param('dataset', dataset)
batches = math.ceil(dataset.shape[0] / self.batch_size)
if not self.incremental or not self._activate_table:
self._activate_table = self._init_neuron_activate_table(dataset[0:1])
for i in range(batches):
inputs = dataset[i * self.batch_size: (i + 1) * self.batch_size]
self._model.predict(Tensor(inputs))
layer_out = _get_summary_tensor_data()
for layer, tensor in layer_out.items():
value = tensor.asnumpy()
if len(value.shape) > 2:
value = np.mean(value, axis=tuple([i for i in range(2, len(value.shape))]))
top_k_value = np.sort(value)[:, -self.top_k].reshape(value.shape[0], 1)
top_k_value = np.sum((value - top_k_value) >= 0, axis=0) > 0
self._activate_table[layer] = np.logical_or(self._activate_table[layer], top_k_value)
top_k_neuron_coverage = self._activate_rate()
return top_k_neuron_coverage
class SuperNeuronActivateCoverage(CoverageMetrics):
def __init__(self, model, train_dataset, incremental=False, batch_size=32):
super(SuperNeuronActivateCoverage, self).__init__(model, incremental=incremental, batch_size=batch_size)
train_dataset = check_numpy_param('train_dataset', train_dataset)
self.upper_bounds, self.lower_bounds = self._get_bounds(train_dataset=train_dataset)
def get_metrics(self, dataset):
dataset = check_numpy_param('dataset', dataset)
if not self.incremental or not self._activate_table:
self._activate_table = self._init_neuron_activate_table(dataset[0:1])
batches = math.ceil(dataset.shape[0] / self.batch_size)
for i in range(batches):
inputs = dataset[i * self.batch_size: (i + 1) * self.batch_size]
self._model.predict(Tensor(inputs))
layer_out = _get_summary_tensor_data()
for layer, tensor in layer_out.items():
value = tensor.asnumpy()
if len(value.shape) > 2:
value = np.mean(value, axis=tuple([i for i in range(2, len(value.shape))]))
activate = np.sum(value > self.upper_bounds[layer], axis=0) > 0
self._activate_table[layer] = np.logical_or(self._activate_table[layer], activate)
snac = self._activate_rate()
return snac
class NeuronBoundsCoverage(SuperNeuronActivateCoverage):
def __init__(self, model, train_dataset, incremental=False, batch_size=32):
super(NeuronBoundsCoverage, self).__init__(model, train_dataset, incremental=incremental, batch_size=batch_size)
def get_metrics(self, dataset):
dataset = check_numpy_param('dataset', dataset)
if not self.incremental or not self._activate_table:
self._activate_table = self._init_neuron_activate_table(dataset[0:1])
batches = math.ceil(dataset.shape[0] / self.batch_size)
for i in range(batches):
inputs = dataset[i * self.batch_size: (i + 1) * self.batch_size]
self._model.predict(Tensor(inputs))
layer_out = _get_summary_tensor_data()
for layer, tensor in layer_out.items():
value = tensor.asnumpy()
if len(value.shape) > 2:
value = np.mean(value, axis=tuple([i for i in range(2, len(value.shape))]))
outer = np.logical_or(value > self.upper_bounds[layer], value < self.lower_bounds[layer])
activate = np.sum(outer, axis=0) > 0
self._activate_table[layer] = np.logical_or(self._activate_table[layer], activate)
nbc = self._activate_rate()
return nbc
class KMultisectionNeuronCoverage(SuperNeuronActivateCoverage):
def __init__(self, model, train_dataset, segmented_num=100, incremental=False, batch_size=32):
super(KMultisectionNeuronCoverage, self).__init__(model, train_dataset, incremental=incremental,
batch_size=batch_size)
self.segmented_num = check_int_positive('segmented_num', segmented_num)
self.intervals = defaultdict(list)
for keys in self.upper_bounds.keys():
self.intervals[keys] = (self.upper_bounds[keys] - self.lower_bounds[keys]) / self.segmented_num
def _init_k_multisection_table(self, data):
self._model.predict(Tensor(data))
layer_out = _get_summary_tensor_data()
activate_section_table = defaultdict()
for layer, value in layer_out.items():
activate_section_table[layer] = np.zeros((value.shape[1], self.segmented_num), np.bool)
return activate_section_table
def get_metrics(self, dataset):
dataset = check_numpy_param('dataset', dataset)
if not self.incremental or not self._activate_table:
self._activate_table = self._init_k_multisection_table(dataset[0:1])
batches = math.ceil(dataset.shape[0] / self.batch_size)
for i in range(batches):
inputs = dataset[i * self.batch_size: (i + 1) * self.batch_size]
self._model.predict(Tensor(inputs))
layer_out = _get_summary_tensor_data()
for layer, tensor in layer_out.items():
value = tensor.asnumpy()
value = np.mean(value, axis=tuple([i for i in range(2, len(value.shape))]))
hits = np.floor((value - self.lower_bounds[layer]) / self.intervals[layer]).astype(int)
hits = np.transpose(hits, [1, 0])
for n in range(len(hits)):
for sec in hits[n]:
if sec >= self.segmented_num or sec < 0:
continue
self._activate_table[layer][n][sec] = True
kmnc = self._activate_rate() / self.segmented_num
return kmnc
| true | true |
f71ee84796075a987a34679b0f4402f426c24334 | 2,327 | py | Python | main.py | Shikhar1998/Basic-Facebook-Notifications-Informer- | 575e3446656697c071739e3ccc65764c5ac1e5ec | [
"MIT"
] | 1 | 2017-01-13T11:10:16.000Z | 2017-01-13T11:10:16.000Z | main.py | Shikhar1998/Basic-Facebook-Notifications-Informer- | 575e3446656697c071739e3ccc65764c5ac1e5ec | [
"MIT"
] | null | null | null | main.py | Shikhar1998/Basic-Facebook-Notifications-Informer- | 575e3446656697c071739e3ccc65764c5ac1e5ec | [
"MIT"
] | null | null | null | import urllib
import urllib2
import time
import json
#install using facebook-sdk"
import facebook
def main():
access = "https://graph.facebook.com/"
ACCESS_TOKEN = XXXXXXXXXXXXX #enter your access token here
datafile = urllib2.urlopen(access + 'me?fields=feed&access_token='+ACCESS_TOKEN)
sigma = 0
try:
fo = open("log.txt","r+")
except IOError:
sigma = ""
fo = open("log.txt","w+")
n = 0
#number of new notifications
#accessing this functionality for first time
if sigma == "":
n = "Configuration Successful : Automatic display every 30 minutes ON"
index = 0
for ch in json.loads(datafile.read())["feed"]["data"]:
if index == 0:
fo = open("log.txt","w")
fo.write(ch["created_time"][11:19])
index = 1
else:
x = fo.readline()
so = []
temp = ""
for j in x:
if j!= ":":
temp += j
else:
so.append(temp)
temp = ""
so.append(temp)
temp = ""
index = 0
ans = ""
two = ""
for ch in json.loads(datafile.read())["feed"]["data"]:
if index == 0:
fo = open("log.txt","w")
ans = ch["message"]
two = ch["id"]
k = two.index("_")
two = two[:k]
graph = facebook.GraphAPI(ACCESS_TOKEN)
profile = graph.get_object(two)
index = 1
l = []
t = 0
sota = []
for j in ch["created_time"][11:19]:
if j!= ":" :
temp += j
else:
sota.append(temp)
temp = ""
sota.append(temp)
temp = ""
if sota[0]<=so[0] and sota[1]<=so[1] and sota[2]<=so[2]:
break
else:
n = n + 1
if type(n) == str:
return n,0,0
else:
# returning data : number of new posts, post and the name of the person who posted the data
return n,ans,profile["name"]
| 30.618421 | 100 | 0.421573 | import urllib
import urllib2
import time
import json
import facebook
def main():
access = "https://graph.facebook.com/"
ACCESS_TOKEN = XXXXXXXXXXXXX #enter your access token here
datafile = urllib2.urlopen(access + 'me?fields=feed&access_token='+ACCESS_TOKEN)
sigma = 0
try:
fo = open("log.txt","r+")
except IOError:
sigma = ""
fo = open("log.txt","w+")
n = 0
#number of new notifications
#accessing this functionality for first time
if sigma == "":
n = "Configuration Successful : Automatic display every 30 minutes ON"
index = 0
for ch in json.loads(datafile.read())["feed"]["data"]:
if index == 0:
fo = open("log.txt","w")
fo.write(ch["created_time"][11:19])
index = 1
else:
x = fo.readline()
so = []
temp = ""
for j in x:
if j!= ":":
temp += j
else:
so.append(temp)
temp = ""
so.append(temp)
temp = ""
index = 0
ans = ""
two = ""
for ch in json.loads(datafile.read())["feed"]["data"]:
if index == 0:
fo = open("log.txt","w")
ans = ch["message"]
two = ch["id"]
k = two.index("_")
two = two[:k]
graph = facebook.GraphAPI(ACCESS_TOKEN)
profile = graph.get_object(two)
index = 1
l = []
t = 0
sota = []
for j in ch["created_time"][11:19]:
if j!= ":" :
temp += j
else:
sota.append(temp)
temp = ""
sota.append(temp)
temp = ""
if sota[0]<=so[0] and sota[1]<=so[1] and sota[2]<=so[2]:
break
else:
n = n + 1
if type(n) == str:
return n,0,0
else:
# returning data : number of new posts, post and the name of the person who posted the data
return n,ans,profile["name"]
| true | true |
f71ee856c8feca44b1ac13c75d39f7729b50ec80 | 753 | py | Python | setup.py | quiquee/python-bip32 | c8d04eb7d25320d93ae5d05f9c6415b70d56b97f | [
"BSD-3-Clause"
] | 28 | 2020-02-13T18:04:57.000Z | 2022-03-14T05:27:18.000Z | setup.py | quiquee/python-bip32 | c8d04eb7d25320d93ae5d05f9c6415b70d56b97f | [
"BSD-3-Clause"
] | 15 | 2020-02-14T14:36:22.000Z | 2022-03-10T11:43:32.000Z | setup.py | quiquee/python-bip32 | c8d04eb7d25320d93ae5d05f9c6415b70d56b97f | [
"BSD-3-Clause"
] | 11 | 2020-05-21T16:35:48.000Z | 2022-03-28T20:37:30.000Z | from setuptools import setup
import bip32
import io
with io.open("README.md", encoding="utf-8") as f:
long_description = f.read()
with io.open("requirements.txt", encoding="utf-8") as f:
requirements = [r for r in f.read().split('\n') if len(r)]
setup(name="bip32",
version=bip32.__version__,
description="Minimalistic implementation of the BIP32 key derivation scheme",
long_description=long_description,
long_description_content_type="text/markdown",
url="http://github.com/darosior/python-bip32",
author="Antoine Poinsot",
author_email="darosior@protonmail.com",
license="MIT",
packages=["bip32"],
keywords=["bitcoin", "bip32", "hdwallet"],
install_requires=requirements)
| 31.375 | 83 | 0.686587 | from setuptools import setup
import bip32
import io
with io.open("README.md", encoding="utf-8") as f:
long_description = f.read()
with io.open("requirements.txt", encoding="utf-8") as f:
requirements = [r for r in f.read().split('\n') if len(r)]
setup(name="bip32",
version=bip32.__version__,
description="Minimalistic implementation of the BIP32 key derivation scheme",
long_description=long_description,
long_description_content_type="text/markdown",
url="http://github.com/darosior/python-bip32",
author="Antoine Poinsot",
author_email="darosior@protonmail.com",
license="MIT",
packages=["bip32"],
keywords=["bitcoin", "bip32", "hdwallet"],
install_requires=requirements)
| true | true |
f71ee911da76fff248edaa53f5ef983542823256 | 1,003 | py | Python | degvabank/degvabank/core/admin.py | Vixx-X/DEGVABanck-backend | de413d55b55dba25e89b7f3bc60dfa94e89ddcde | [
"MIT"
] | null | null | null | degvabank/degvabank/core/admin.py | Vixx-X/DEGVABanck-backend | de413d55b55dba25e89b7f3bc60dfa94e89ddcde | [
"MIT"
] | null | null | null | degvabank/degvabank/core/admin.py | Vixx-X/DEGVABanck-backend | de413d55b55dba25e89b7f3bc60dfa94e89ddcde | [
"MIT"
] | 1 | 2022-02-03T03:18:43.000Z | 2022-02-03T03:18:43.000Z | from django.contrib.admin import AdminSite
from django.contrib.admin.apps import AdminConfig
from django.urls import path
class CustomAdminSite(AdminSite):
def get_urls(self):
from degvabank.core import views
urls = super().get_urls()
my_urls = [
path(
'reports/client_transaction/',
self.admin_view(views.ReportClientTransaction.as_view()),
name='report-client-transaction',
),
path(
'reports/client_list/',
self.admin_view(views.ReportClientList.as_view()),
name='report-client-list',
),
path(
'reports/transactions/',
self.admin_view(views.ReportTransactions.as_view()),
name='report-transactions',
),
]
urls = my_urls + urls
return urls
class AdminConfig(AdminConfig):
default_site = "degvabank.core.admin.CustomAdminSite"
| 30.393939 | 73 | 0.57328 | from django.contrib.admin import AdminSite
from django.contrib.admin.apps import AdminConfig
from django.urls import path
class CustomAdminSite(AdminSite):
def get_urls(self):
from degvabank.core import views
urls = super().get_urls()
my_urls = [
path(
'reports/client_transaction/',
self.admin_view(views.ReportClientTransaction.as_view()),
name='report-client-transaction',
),
path(
'reports/client_list/',
self.admin_view(views.ReportClientList.as_view()),
name='report-client-list',
),
path(
'reports/transactions/',
self.admin_view(views.ReportTransactions.as_view()),
name='report-transactions',
),
]
urls = my_urls + urls
return urls
class AdminConfig(AdminConfig):
default_site = "degvabank.core.admin.CustomAdminSite"
| true | true |
f71eea01e30f1d3cf1b00ffad3c1feb94073a3fe | 10,231 | py | Python | dev/dev.py | glryanon/Trusty-cogs | 1d5056ad166a7e7ee5039baa31748b1995ae81f6 | [
"MIT"
] | 1 | 2020-12-28T15:58:16.000Z | 2020-12-28T15:58:16.000Z | dev/dev.py | glryanon/Trusty-cogs | 1d5056ad166a7e7ee5039baa31748b1995ae81f6 | [
"MIT"
] | null | null | null | dev/dev.py | glryanon/Trusty-cogs | 1d5056ad166a7e7ee5039baa31748b1995ae81f6 | [
"MIT"
] | null | null | null | import asyncio
import inspect
import io
import textwrap
import traceback
import re
from contextlib import redirect_stdout
from copy import copy
import discord
from redbot.core import checks, commands
from redbot.core.i18n import Translator
from redbot.core.utils.chat_formatting import box, pagify
"""
Notice:
95% of the below code came from R.Danny which can be found here:
https://github.com/Rapptz/RoboDanny/blob/master/cogs/repl.py
"""
_ = Translator("Dev", __file__)
START_CODE_BLOCK_RE = re.compile(r"^((```py)(?=\s)|(```))")
class Dev(commands.Cog):
"""Various development focused utilities."""
def __init__(self, bot):
super().__init__()
self.bot = bot
self._last_result = None
self.sessions = set()
@staticmethod
def cleanup_code(content):
"""Automatically removes code blocks from the code."""
# remove ```py\n```
if content.startswith("```") and content.endswith("```"):
return START_CODE_BLOCK_RE.sub("", content)[:-3]
# remove `foo`
return content.strip("` \n")
@staticmethod
def get_syntax_error(e):
"""Format a syntax error to send to the user.
Returns a string representation of the error formatted as a codeblock.
"""
if e.text is None:
return box("{0.__class__.__name__}: {0}".format(e), lang="py")
return box("{0.text}{1:>{0.offset}}\n{2}: {0}".format(e, "^", type(e).__name__), lang="py")
@staticmethod
def get_pages(msg: str):
"""Pagify the given message for output to the user."""
return pagify(msg, delims=["\n", " "], priority=True, shorten_by=10)
@staticmethod
def sanitize_output(ctx: commands.Context, input_: str) -> str:
"""Hides the bot's token from a string."""
token = ctx.bot.http.token
r = "[EXPUNGED]"
result = input_.replace(token, r)
result = result.replace(token.lower(), r)
result = result.replace(token.upper(), r)
return result
@commands.command()
@checks.is_owner()
async def debug(self, ctx, *, code):
"""Evaluate a statement of python code.
The bot will always respond with the return value of the code.
If the return value of the code is a coroutine, it will be awaited,
and the result of that will be the bot's response.
Note: Only one statement may be evaluated. Using await, yield or
similar restricted keywords will result in a syntax error. For multiple
lines or asynchronous code, see [p]repl or [p]eval.
Environment Variables:
ctx - command invokation context
bot - bot object
channel - the current channel object
author - command author's member object
message - the command's message object
discord - discord.py library
commands - redbot.core.commands
_ - The result of the last dev command.
"""
env = {
"bot": ctx.bot,
"ctx": ctx,
"channel": ctx.channel,
"author": ctx.author,
"guild": ctx.guild,
"message": ctx.message,
"discord": discord,
"commands": commands,
"_": self._last_result,
}
code = self.cleanup_code(code)
try:
result = eval(code, env)
except SyntaxError as e:
await ctx.send(self.get_syntax_error(e))
return
except Exception as e:
await ctx.send(box("{}: {!s}".format(type(e).__name__, e), lang="py"))
return
if asyncio.iscoroutine(result):
result = await result
self._last_result = result
result = self.sanitize_output(ctx, str(result))
await ctx.send_interactive(self.get_pages(result), box_lang="py")
@commands.command(name="eval")
@checks.is_owner()
async def _eval(self, ctx, *, body: str):
"""Execute asynchronous code.
This command wraps code into the body of an async function and then
calls and awaits it. The bot will respond with anything printed to
stdout, as well as the return value of the function.
The code can be within a codeblock, inline code or neither, as long
as they are not mixed and they are formatted correctly.
Environment Variables:
ctx - command invokation context
bot - bot object
channel - the current channel object
author - command author's member object
message - the command's message object
discord - discord.py library
commands - redbot.core.commands
_ - The result of the last dev command.
"""
env = {
"bot": ctx.bot,
"ctx": ctx,
"channel": ctx.channel,
"author": ctx.author,
"guild": ctx.guild,
"message": ctx.message,
"discord": discord,
"commands": commands,
"_": self._last_result,
}
body = self.cleanup_code(body)
stdout = io.StringIO()
to_compile = "async def func():\n%s" % textwrap.indent(body, " ")
try:
exec(to_compile, env)
except SyntaxError as e:
return await ctx.send(self.get_syntax_error(e))
func = env["func"]
result = None
try:
with redirect_stdout(stdout):
result = await func()
except:
printed = "{}{}".format(stdout.getvalue(), traceback.format_exc())
else:
printed = stdout.getvalue()
await ctx.tick()
if result is not None:
self._last_result = result
msg = "{}{}".format(printed, result)
else:
msg = printed
msg = self.sanitize_output(ctx, msg)
await ctx.send_interactive(self.get_pages(msg), box_lang="py")
@commands.command()
@checks.is_owner()
async def repl(self, ctx):
"""Open an interactive REPL.
The REPL will only recognise code as messages which start with a
backtick. This includes codeblocks, and as such multiple lines can be
evaluated.
You may not await any code in this REPL unless you define it inside an
async function.
"""
variables = {
"ctx": ctx,
"bot": ctx.bot,
"message": ctx.message,
"guild": ctx.guild,
"channel": ctx.channel,
"author": ctx.author,
"_": None,
}
if ctx.channel.id in self.sessions:
await ctx.send(
_("Already running a REPL session in this channel. Exit it with `quit`.")
)
return
self.sessions.add(ctx.channel.id)
await ctx.send(_("Enter code to execute or evaluate. `exit()` or `quit` to exit."))
msg_check = lambda m: (
m.author == ctx.author and m.channel == ctx.channel and m.content.startswith("`")
)
while True:
response = await ctx.bot.wait_for("message", check=msg_check)
cleaned = self.cleanup_code(response.content)
if cleaned in ("quit", "exit", "exit()"):
await ctx.send("Exiting.")
self.sessions.remove(ctx.channel.id)
return
executor = exec
if cleaned.count("\n") == 0:
# single statement, potentially 'eval'
try:
code = compile(cleaned, "<repl session>", "eval")
except SyntaxError:
pass
else:
executor = eval
if executor is exec:
try:
code = compile(cleaned, "<repl session>", "exec")
except SyntaxError as e:
await ctx.send(self.get_syntax_error(e))
continue
variables["message"] = response
stdout = io.StringIO()
msg = ""
try:
with redirect_stdout(stdout):
result = executor(code, variables)
if inspect.isawaitable(result):
result = await result
except:
value = stdout.getvalue()
msg = "{}{}".format(value, traceback.format_exc())
else:
value = stdout.getvalue()
if result is not None:
msg = "{}{}".format(value, result)
variables["_"] = result
elif value:
msg = "{}".format(value)
msg = self.sanitize_output(ctx, msg)
try:
await ctx.send_interactive(self.get_pages(msg), box_lang="py")
except discord.Forbidden:
pass
except discord.HTTPException as e:
await ctx.send(_("Unexpected error: `{}`").format(e))
@commands.command()
@checks.is_owner()
async def sudo(self, ctx, user: discord.Member, *, command):
"""Mock another user invoking a command.
The prefix must not be entered.
"""
msg = copy(ctx.message)
msg.author = user
msg.content = ctx.prefix + command
ctx.bot.dispatch("message", msg)
@commands.command(name="sudomsg")
@checks.is_owner()
async def mock_msg(self, ctx, user: discord.Member, *, content: str):
"""Dispatch a message event as if it were sent by a different user.
Only reads the raw content of the message. Attachments, embeds etc. are
ignored.
"""
old_author = ctx.author
old_content = ctx.message.content
ctx.message.author = user
ctx.message.content = content
ctx.bot.dispatch("message", ctx.message)
# If we change the author and content back too quickly,
# the bot won't process the mocked message in time.
await asyncio.sleep(2)
ctx.message.author = old_author
ctx.message.content = old_content
| 32.274448 | 99 | 0.55498 | import asyncio
import inspect
import io
import textwrap
import traceback
import re
from contextlib import redirect_stdout
from copy import copy
import discord
from redbot.core import checks, commands
from redbot.core.i18n import Translator
from redbot.core.utils.chat_formatting import box, pagify
_ = Translator("Dev", __file__)
START_CODE_BLOCK_RE = re.compile(r"^((```py)(?=\s)|(```))")
class Dev(commands.Cog):
def __init__(self, bot):
super().__init__()
self.bot = bot
self._last_result = None
self.sessions = set()
@staticmethod
def cleanup_code(content):
if content.startswith("```") and content.endswith("```"):
return START_CODE_BLOCK_RE.sub("", content)[:-3]
return content.strip("` \n")
@staticmethod
def get_syntax_error(e):
if e.text is None:
return box("{0.__class__.__name__}: {0}".format(e), lang="py")
return box("{0.text}{1:>{0.offset}}\n{2}: {0}".format(e, "^", type(e).__name__), lang="py")
@staticmethod
def get_pages(msg: str):
return pagify(msg, delims=["\n", " "], priority=True, shorten_by=10)
@staticmethod
def sanitize_output(ctx: commands.Context, input_: str) -> str:
token = ctx.bot.http.token
r = "[EXPUNGED]"
result = input_.replace(token, r)
result = result.replace(token.lower(), r)
result = result.replace(token.upper(), r)
return result
@commands.command()
@checks.is_owner()
async def debug(self, ctx, *, code):
env = {
"bot": ctx.bot,
"ctx": ctx,
"channel": ctx.channel,
"author": ctx.author,
"guild": ctx.guild,
"message": ctx.message,
"discord": discord,
"commands": commands,
"_": self._last_result,
}
code = self.cleanup_code(code)
try:
result = eval(code, env)
except SyntaxError as e:
await ctx.send(self.get_syntax_error(e))
return
except Exception as e:
await ctx.send(box("{}: {!s}".format(type(e).__name__, e), lang="py"))
return
if asyncio.iscoroutine(result):
result = await result
self._last_result = result
result = self.sanitize_output(ctx, str(result))
await ctx.send_interactive(self.get_pages(result), box_lang="py")
@commands.command(name="eval")
@checks.is_owner()
async def _eval(self, ctx, *, body: str):
env = {
"bot": ctx.bot,
"ctx": ctx,
"channel": ctx.channel,
"author": ctx.author,
"guild": ctx.guild,
"message": ctx.message,
"discord": discord,
"commands": commands,
"_": self._last_result,
}
body = self.cleanup_code(body)
stdout = io.StringIO()
to_compile = "async def func():\n%s" % textwrap.indent(body, " ")
try:
exec(to_compile, env)
except SyntaxError as e:
return await ctx.send(self.get_syntax_error(e))
func = env["func"]
result = None
try:
with redirect_stdout(stdout):
result = await func()
except:
printed = "{}{}".format(stdout.getvalue(), traceback.format_exc())
else:
printed = stdout.getvalue()
await ctx.tick()
if result is not None:
self._last_result = result
msg = "{}{}".format(printed, result)
else:
msg = printed
msg = self.sanitize_output(ctx, msg)
await ctx.send_interactive(self.get_pages(msg), box_lang="py")
@commands.command()
@checks.is_owner()
async def repl(self, ctx):
variables = {
"ctx": ctx,
"bot": ctx.bot,
"message": ctx.message,
"guild": ctx.guild,
"channel": ctx.channel,
"author": ctx.author,
"_": None,
}
if ctx.channel.id in self.sessions:
await ctx.send(
_("Already running a REPL session in this channel. Exit it with `quit`.")
)
return
self.sessions.add(ctx.channel.id)
await ctx.send(_("Enter code to execute or evaluate. `exit()` or `quit` to exit."))
msg_check = lambda m: (
m.author == ctx.author and m.channel == ctx.channel and m.content.startswith("`")
)
while True:
response = await ctx.bot.wait_for("message", check=msg_check)
cleaned = self.cleanup_code(response.content)
if cleaned in ("quit", "exit", "exit()"):
await ctx.send("Exiting.")
self.sessions.remove(ctx.channel.id)
return
executor = exec
if cleaned.count("\n") == 0:
try:
code = compile(cleaned, "<repl session>", "eval")
except SyntaxError:
pass
else:
executor = eval
if executor is exec:
try:
code = compile(cleaned, "<repl session>", "exec")
except SyntaxError as e:
await ctx.send(self.get_syntax_error(e))
continue
variables["message"] = response
stdout = io.StringIO()
msg = ""
try:
with redirect_stdout(stdout):
result = executor(code, variables)
if inspect.isawaitable(result):
result = await result
except:
value = stdout.getvalue()
msg = "{}{}".format(value, traceback.format_exc())
else:
value = stdout.getvalue()
if result is not None:
msg = "{}{}".format(value, result)
variables["_"] = result
elif value:
msg = "{}".format(value)
msg = self.sanitize_output(ctx, msg)
try:
await ctx.send_interactive(self.get_pages(msg), box_lang="py")
except discord.Forbidden:
pass
except discord.HTTPException as e:
await ctx.send(_("Unexpected error: `{}`").format(e))
@commands.command()
@checks.is_owner()
async def sudo(self, ctx, user: discord.Member, *, command):
msg = copy(ctx.message)
msg.author = user
msg.content = ctx.prefix + command
ctx.bot.dispatch("message", msg)
@commands.command(name="sudomsg")
@checks.is_owner()
async def mock_msg(self, ctx, user: discord.Member, *, content: str):
old_author = ctx.author
old_content = ctx.message.content
ctx.message.author = user
ctx.message.content = content
ctx.bot.dispatch("message", ctx.message)
await asyncio.sleep(2)
ctx.message.author = old_author
ctx.message.content = old_content
| true | true |
f71eea68f5a737010440afc05bea9804d4dc0846 | 4,204 | py | Python | python/jittor/test/test_argsort_op.py | Exusial/jittor | eca21d5bba5098bce4f492fa44908677b6e76588 | [
"Apache-2.0"
] | 2,571 | 2020-03-20T03:38:35.000Z | 2022-03-31T08:20:05.000Z | python/jittor/test/test_argsort_op.py | Exusial/jittor | eca21d5bba5098bce4f492fa44908677b6e76588 | [
"Apache-2.0"
] | 197 | 2020-03-20T04:11:47.000Z | 2022-03-31T10:14:24.000Z | python/jittor/test/test_argsort_op.py | Exusial/jittor | eca21d5bba5098bce4f492fa44908677b6e76588 | [
"Apache-2.0"
] | 284 | 2020-03-20T03:53:15.000Z | 2022-03-28T07:20:32.000Z | # ***************************************************************
# Copyright (c) 2021 Jittor. All Rights Reserved.
# Maintainers:
# Guoye Yang <498731903@qq.com>
# Dun Liang <randonlang@gmail.com>.
#
# This file is subject to the terms and conditions defined in
# file 'LICENSE.txt', which is part of this source code package.
# ***************************************************************
import unittest
import jittor as jt
import numpy as np
from jittor import compile_extern
from .test_log import find_log_with_re
if jt.has_cuda:
from jittor.compile_extern import cublas_ops, cudnn_ops, cub_ops
else:
cublas_ops = cudnn_ops = cub_ops = None
def check_argsort(shape, dim, descending = False):
x = jt.random(shape)
y, y_key = jt.argsort(x, dim=dim, descending=descending)
v = []
for i in range(len(shape)):
if (i == dim):
v.append(y)
else:
v.append(jt.index(shape, dim=i))
yk = jt.reindex(x, v)
yk_ = yk.data
y_key_ = y_key.data
x__ = x.data
if descending:
x__ = -x__
yk__ = np.sort(x__, axis=dim)
if descending:
yk__ = -yk__
assert np.allclose(y_key_, yk__)
assert np.allclose(yk_, yk__)
def check_cub_argsort(shape, dim, descending = False):
with jt.log_capture_scope(
log_silent=1,
log_v=0, log_vprefix="op.cc=100"
) as raw_log:
x = jt.random(shape)
y, y_key = jt.argsort(x, dim=dim, descending=descending)
v = []
for i in range(len(shape)):
if (i == dim):
v.append(y)
else:
v.append(jt.index(shape, dim=i))
yk = jt.reindex(x, v)
yk_ = yk.data
y_key_ = y_key.data
logs = find_log_with_re(raw_log, "(Jit op key (not )?found: " + "cub_argsort" + ".*)")
assert len(logs)==1
x__ = x.data
if descending:
x__ = -x__
yk__ = np.sort(x__, axis=dim)
if descending:
yk__ = -yk__
assert np.allclose(y_key_, yk__)
assert np.allclose(yk_, yk__)
def check_backward(shape, dim, descending = False):
x = jt.random(shape)
y, y_key = jt.argsort(x, dim=dim, descending=descending)
loss = (y_key * y_key).sum()
gs = jt.grad(loss, x)
assert np.allclose(x.data*2, gs.data)
class TestArgsortOp(unittest.TestCase):
def test(self):
check_argsort([5,5], 0, False)
check_argsort([5,5], 0, True)
check_argsort([5,5], 1, False)
check_argsort([5,5], 1, True)
check_argsort([12, 34, 56, 78], 1, True)
check_argsort([12, 34, 56, 78], 3, True)
check_argsort([12, 34, 56, 78], 2, False)
check_argsort([12, 34, 56, 78], 0, False)
def test_backward(self):
check_backward([5,5], 0, False)
check_backward([5,5], 0, True)
check_backward([5,5], 1, False)
check_backward([5,5], 1, True)
check_backward([12, 34, 56, 78], 1, True)
check_backward([12, 34, 56, 78], 3, True)
check_backward([12, 34, 56, 78], 2, False)
check_backward([12, 34, 56, 78], 0, False)
def test_doc(self):
assert "Argsort Operator" in jt.argsort.__doc__
@unittest.skipIf(cub_ops==None, "Not use cub, Skip")
@jt.flag_scope(use_cuda=1)
def test_cub(self):
check_cub_argsort([5,5], 0, False)
check_cub_argsort([5,5], 0, True)
check_cub_argsort([5,5], 1, False)
check_cub_argsort([5,5], 1, True)
check_cub_argsort([12, 34, 56, 78], 1, True)
check_cub_argsort([12, 34, 56, 78], 3, True)
check_cub_argsort([12, 34, 56, 78], 2, False)
check_cub_argsort([12, 34, 56, 78], 0, False)
@unittest.skipIf(cub_ops==None, "Not use cub, Skip")
@jt.flag_scope(use_cuda=1)
def test_cub_backward(self):
check_backward([5,5], 0, False)
check_backward([5,5], 0, True)
check_backward([5,5], 1, False)
check_backward([5,5], 1, True)
check_backward([12, 34, 56, 78], 1, True)
check_backward([12, 34, 56, 78], 3, True)
check_backward([12, 34, 56, 78], 2, False)
check_backward([12, 34, 56, 78], 0, False)
if __name__ == "__main__":
unittest.main() | 33.903226 | 90 | 0.575642 |
import unittest
import jittor as jt
import numpy as np
from jittor import compile_extern
from .test_log import find_log_with_re
if jt.has_cuda:
from jittor.compile_extern import cublas_ops, cudnn_ops, cub_ops
else:
cublas_ops = cudnn_ops = cub_ops = None
def check_argsort(shape, dim, descending = False):
x = jt.random(shape)
y, y_key = jt.argsort(x, dim=dim, descending=descending)
v = []
for i in range(len(shape)):
if (i == dim):
v.append(y)
else:
v.append(jt.index(shape, dim=i))
yk = jt.reindex(x, v)
yk_ = yk.data
y_key_ = y_key.data
x__ = x.data
if descending:
x__ = -x__
yk__ = np.sort(x__, axis=dim)
if descending:
yk__ = -yk__
assert np.allclose(y_key_, yk__)
assert np.allclose(yk_, yk__)
def check_cub_argsort(shape, dim, descending = False):
with jt.log_capture_scope(
log_silent=1,
log_v=0, log_vprefix="op.cc=100"
) as raw_log:
x = jt.random(shape)
y, y_key = jt.argsort(x, dim=dim, descending=descending)
v = []
for i in range(len(shape)):
if (i == dim):
v.append(y)
else:
v.append(jt.index(shape, dim=i))
yk = jt.reindex(x, v)
yk_ = yk.data
y_key_ = y_key.data
logs = find_log_with_re(raw_log, "(Jit op key (not )?found: " + "cub_argsort" + ".*)")
assert len(logs)==1
x__ = x.data
if descending:
x__ = -x__
yk__ = np.sort(x__, axis=dim)
if descending:
yk__ = -yk__
assert np.allclose(y_key_, yk__)
assert np.allclose(yk_, yk__)
def check_backward(shape, dim, descending = False):
x = jt.random(shape)
y, y_key = jt.argsort(x, dim=dim, descending=descending)
loss = (y_key * y_key).sum()
gs = jt.grad(loss, x)
assert np.allclose(x.data*2, gs.data)
class TestArgsortOp(unittest.TestCase):
def test(self):
check_argsort([5,5], 0, False)
check_argsort([5,5], 0, True)
check_argsort([5,5], 1, False)
check_argsort([5,5], 1, True)
check_argsort([12, 34, 56, 78], 1, True)
check_argsort([12, 34, 56, 78], 3, True)
check_argsort([12, 34, 56, 78], 2, False)
check_argsort([12, 34, 56, 78], 0, False)
def test_backward(self):
check_backward([5,5], 0, False)
check_backward([5,5], 0, True)
check_backward([5,5], 1, False)
check_backward([5,5], 1, True)
check_backward([12, 34, 56, 78], 1, True)
check_backward([12, 34, 56, 78], 3, True)
check_backward([12, 34, 56, 78], 2, False)
check_backward([12, 34, 56, 78], 0, False)
def test_doc(self):
assert "Argsort Operator" in jt.argsort.__doc__
@unittest.skipIf(cub_ops==None, "Not use cub, Skip")
@jt.flag_scope(use_cuda=1)
def test_cub(self):
check_cub_argsort([5,5], 0, False)
check_cub_argsort([5,5], 0, True)
check_cub_argsort([5,5], 1, False)
check_cub_argsort([5,5], 1, True)
check_cub_argsort([12, 34, 56, 78], 1, True)
check_cub_argsort([12, 34, 56, 78], 3, True)
check_cub_argsort([12, 34, 56, 78], 2, False)
check_cub_argsort([12, 34, 56, 78], 0, False)
@unittest.skipIf(cub_ops==None, "Not use cub, Skip")
@jt.flag_scope(use_cuda=1)
def test_cub_backward(self):
check_backward([5,5], 0, False)
check_backward([5,5], 0, True)
check_backward([5,5], 1, False)
check_backward([5,5], 1, True)
check_backward([12, 34, 56, 78], 1, True)
check_backward([12, 34, 56, 78], 3, True)
check_backward([12, 34, 56, 78], 2, False)
check_backward([12, 34, 56, 78], 0, False)
if __name__ == "__main__":
unittest.main() | true | true |
f71eea94ed32d1472943ed5187bc14418db296ff | 40,905 | py | Python | gpMgmt/bin/gppylib/operations/buildMirrorSegments.py | abhisheknishant138/gpdb | 1805743d505837026aa137cabb8a7072d745a129 | [
"PostgreSQL",
"Apache-2.0"
] | 4 | 2017-11-28T08:12:58.000Z | 2020-10-28T04:15:52.000Z | gpMgmt/bin/gppylib/operations/buildMirrorSegments.py | abhisheknishant138/gpdb | 1805743d505837026aa137cabb8a7072d745a129 | [
"PostgreSQL",
"Apache-2.0"
] | null | null | null | gpMgmt/bin/gppylib/operations/buildMirrorSegments.py | abhisheknishant138/gpdb | 1805743d505837026aa137cabb8a7072d745a129 | [
"PostgreSQL",
"Apache-2.0"
] | null | null | null | import datetime
import os
import pipes
import signal
import time
from gppylib.mainUtils import *
from gppylib.utils import checkNotNone
from gppylib.db import dbconn
from gppylib import gparray, gplog
from gppylib.commands import unix
from gppylib.commands import gp
from gppylib.commands import base
from gppylib.gparray import GpArray
from gppylib.operations import startSegments
from gppylib.gp_era import read_era
from gppylib.operations.utils import ParallelOperation, RemoteOperation
from gppylib.system import configurationInterface as configInterface
from gppylib.commands.gp import is_pid_postmaster, get_pid_from_remotehost
from gppylib.commands.unix import check_pid_on_remotehost, Scp
logger = gplog.get_default_logger()
gDatabaseDirectories = [
# this list occur inside initdb.c
"global",
"log",
"pg_wal",
"pg_xact",
"pg_changetracking",
"pg_subtrans",
"pg_twophase",
"pg_multixact",
"pg_distributedxidmap",
"pg_distributedlog",
"base",
"pg_tblspc",
"pg_stat_tmp"
]
#
# Database files that may exist in the root directory and need deleting
#
gDatabaseFiles = [
"PG_VERSION",
"pg_hba.conf",
"pg_ident.conf",
"postgresql.conf",
"postmaster.log",
"postmaster.opts",
"postmaster.pid",
]
#
# note: it's a little quirky that caller must set up failed/failover so that failover is in gparray but
# failed is not (if both set)...change that, or at least protect against problems
#
class GpMirrorToBuild:
def __init__(self, failedSegment, liveSegment, failoverSegment, forceFullSynchronization, logger=logger):
checkNotNone("liveSegment", liveSegment)
checkNotNone("forceFullSynchronization", forceFullSynchronization)
if failedSegment is None and failoverSegment is None:
raise Exception("No mirror passed to GpMirrorToBuild")
if not liveSegment.isSegmentQE():
raise ExceptionNoStackTraceNeeded("Segment to recover from for content %s is not a correct segment "
"(it is a coordinator or standby coordinator)" % liveSegment.getSegmentContentId())
if not liveSegment.isSegmentPrimary(True):
raise ExceptionNoStackTraceNeeded(
"Segment to recover from for content %s is not a primary" % liveSegment.getSegmentContentId())
if not liveSegment.isSegmentUp():
raise ExceptionNoStackTraceNeeded(
"Primary segment is not up for content %s" % liveSegment.getSegmentContentId())
if failedSegment is not None:
if failedSegment.getSegmentContentId() != liveSegment.getSegmentContentId():
raise ExceptionNoStackTraceNeeded(
"The primary is not of the same content as the failed mirror. Primary content %d, "
"mirror content %d" % (liveSegment.getSegmentContentId(), failedSegment.getSegmentContentId()))
if failedSegment.getSegmentDbId() == liveSegment.getSegmentDbId():
raise ExceptionNoStackTraceNeeded("For content %d, the dbid values are the same. "
"A segment may not be recovered from itself" %
liveSegment.getSegmentDbId())
if failoverSegment is not None:
if failoverSegment.getSegmentContentId() != liveSegment.getSegmentContentId():
raise ExceptionNoStackTraceNeeded(
"The primary is not of the same content as the mirror. Primary content %d, "
"mirror content %d" % (liveSegment.getSegmentContentId(), failoverSegment.getSegmentContentId()))
if failoverSegment.getSegmentDbId() == liveSegment.getSegmentDbId():
raise ExceptionNoStackTraceNeeded("For content %d, the dbid values are the same. "
"A segment may not be built from itself"
% liveSegment.getSegmentDbId())
if failedSegment is not None and failoverSegment is not None:
# for now, we require the code to have produced this -- even when moving the segment to another
# location, we preserve the directory
assert failedSegment.getSegmentDbId() == failoverSegment.getSegmentDbId()
self.__failedSegment = failedSegment
self.__liveSegment = liveSegment
self.__failoverSegment = failoverSegment
"""
__forceFullSynchronization is true if full resynchronization should be FORCED -- that is, the
existing segment will be cleared and all objects will be transferred by the file resynchronization
process on the server
"""
self.__forceFullSynchronization = forceFullSynchronization
def getFailedSegment(self):
"""
returns the segment that failed. This can be None, for example when adding mirrors
"""
return self.__failedSegment
def getLiveSegment(self):
"""
returns the primary segment from which the recovery will take place. Will always be non-None
"""
return self.__liveSegment
def getFailoverSegment(self):
"""
returns the target segment to which we will copy the data, or None
if we will recover in place. Note that __failoverSegment should refer to the same dbid
as __failedSegment, but should have updated path + file information.
"""
return self.__failoverSegment
def isFullSynchronization(self):
"""
Returns whether or not this segment to recover needs to recover using full resynchronization
"""
if self.__forceFullSynchronization:
return True
# if we are failing over to a new segment location then we must fully resync
if self.__failoverSegment is not None:
return True
return False
class GpMirrorListToBuild:
class Progress:
NONE = 0
INPLACE = 1
SEQUENTIAL = 2
def __init__(self, toBuild, pool, quiet, parallelDegree, additionalWarnings=None, logger=logger, forceoverwrite=False, progressMode=Progress.INPLACE):
self.__mirrorsToBuild = toBuild
self.__pool = pool
self.__quiet = quiet
self.__progressMode = progressMode
self.__parallelDegree = parallelDegree
self.__forceoverwrite = forceoverwrite
self.__additionalWarnings = additionalWarnings or []
if not logger:
raise Exception('logger argument cannot be None')
self.__logger = logger
class ProgressCommand(gp.Command):
"""
A Command, but with an associated DBID and log file path for use by
_join_and_show_segment_progress(). This class is tightly coupled to that
implementation.
"""
def __init__(self, name, cmdStr, dbid, filePath, ctxt, remoteHost):
super(GpMirrorListToBuild.ProgressCommand, self).__init__(name, cmdStr, ctxt, remoteHost)
self.dbid = dbid
self.filePath = filePath
def getMirrorsToBuild(self):
"""
Returns a newly allocated list
"""
return [m for m in self.__mirrorsToBuild]
def getAdditionalWarnings(self):
"""
Returns any additional warnings generated during building of list
"""
return self.__additionalWarnings
class RewindSegmentInfo:
"""
Which segments to run pg_rewind during incremental recovery. The
targetSegment is of type gparray.Segment. All progressFiles should have
the same timeStamp.
"""
def __init__(self, targetSegment, sourceHostname, sourcePort, timeStamp):
self.targetSegment = targetSegment
self.sourceHostname = sourceHostname
self.sourcePort = sourcePort
self.progressFile = '%s/pg_rewind.%s.dbid%s.out' % (gplog.get_logger_dir(),
timeStamp,
targetSegment.getSegmentDbId())
def buildMirrors(self, actionName, gpEnv, gpArray):
"""
Build the mirrors.
gpArray must have already been altered to have updated directories -- that is, the failoverSegments
from the mirrorsToBuild must be present in gpArray.
"""
if len(self.__mirrorsToBuild) == 0:
self.__logger.info("No segments to " + actionName)
return True
self.checkForPortAndDirectoryConflicts(gpArray)
self.__logger.info("%s segment(s) to %s" % (len(self.__mirrorsToBuild), actionName))
# make sure the target directories are up-to-date
# by cleaning them, if needed, and then copying a basic directory there
# the postgresql.conf in that basic directory will need updating (to change the port)
toStopDirectives = []
toEnsureMarkedDown = []
cleanupDirectives = []
copyDirectives = []
for toRecover in self.__mirrorsToBuild:
if toRecover.getFailedSegment() is not None:
# will stop the failed segment. Note that we do this even if we are recovering to a different location!
toStopDirectives.append(GpStopSegmentDirectoryDirective(toRecover.getFailedSegment()))
if toRecover.getFailedSegment().getSegmentStatus() == gparray.STATUS_UP:
toEnsureMarkedDown.append(toRecover.getFailedSegment())
if toRecover.isFullSynchronization():
isTargetReusedLocation = False
if toRecover.getFailedSegment() is not None and \
toRecover.getFailoverSegment() is None:
#
# We are recovering a failed segment in-place
#
cleanupDirectives.append(GpCleanupSegmentDirectoryDirective(toRecover.getFailedSegment()))
isTargetReusedLocation = True
if toRecover.getFailoverSegment() is not None:
targetSegment = toRecover.getFailoverSegment()
else:
targetSegment = toRecover.getFailedSegment()
d = GpCopySegmentDirectoryDirective(toRecover.getLiveSegment(), targetSegment, isTargetReusedLocation)
copyDirectives.append(d)
self.__ensureStopped(gpEnv, toStopDirectives)
self.__ensureMarkedDown(gpEnv, toEnsureMarkedDown)
if not self.__forceoverwrite:
self.__cleanUpSegmentDirectories(cleanupDirectives)
self.__copySegmentDirectories(gpEnv, gpArray, copyDirectives)
# update and save metadata in memory
for toRecover in self.__mirrorsToBuild:
if toRecover.getFailoverSegment() is None:
# we are recovering the lost segment in place
seg = toRecover.getFailedSegment()
else:
seg = toRecover.getFailedSegment()
# no need to update the failed segment's information -- it is
# being overwritten in the configuration with the failover segment
for gpArraySegment in gpArray.getDbList():
if gpArraySegment is seg:
raise Exception(
"failed segment should not be in the new configuration if failing over to new segment")
seg = toRecover.getFailoverSegment()
seg.setSegmentStatus(gparray.STATUS_DOWN) # down initially, we haven't started it yet
seg.setSegmentMode(gparray.MODE_NOT_SYNC)
# figure out what needs to be started or transitioned
mirrorsToStart = []
# Map of mirror dbid to GpMirrorListToBuild.RewindSegmentInfo objects
rewindInfo = {}
primariesToConvert = []
convertPrimaryUsingFullResync = []
fullResyncMirrorDbIds = {}
timeStamp = datetime.datetime.today().strftime('%Y%m%d_%H%M%S')
for toRecover in self.__mirrorsToBuild:
seg = toRecover.getFailoverSegment()
if seg is None:
seg = toRecover.getFailedSegment() # we are recovering in place
mirrorsToStart.append(seg)
primarySeg = toRecover.getLiveSegment()
# Add to rewindInfo to execute pg_rewind later if we are not
# using full recovery. We will run pg_rewind on incremental recovery
# if the target mirror does not have standby.signal file because
# segment failover happened. The check for standby.signal file will
# happen in the same remote SegmentRewind Command call.
if not toRecover.isFullSynchronization() \
and seg.getSegmentRole() == gparray.ROLE_MIRROR:
rewindInfo[seg.getSegmentDbId()] = GpMirrorListToBuild.RewindSegmentInfo(
seg, primarySeg.getSegmentHostName(), primarySeg.getSegmentPort(),
timeStamp)
# The change in configuration to of the mirror to down requires that
# the primary also be marked as unsynchronized.
primarySeg.setSegmentMode(gparray.MODE_NOT_SYNC)
primariesToConvert.append(primarySeg)
convertPrimaryUsingFullResync.append(toRecover.isFullSynchronization())
if toRecover.isFullSynchronization() and seg.getSegmentDbId() > 0:
fullResyncMirrorDbIds[seg.getSegmentDbId()] = True
# should use mainUtils.getProgramName but I can't make it work!
programName = os.path.split(sys.argv[0])[-1]
# Disable Ctrl-C, going to save metadata in database and transition segments
signal.signal(signal.SIGINT, signal.SIG_IGN)
rewindFailedSegments = []
try:
self.__logger.info("Updating configuration with new mirrors")
configInterface.getConfigurationProvider().updateSystemConfig(
gpArray,
"%s: segment config for resync" % programName,
dbIdToForceMirrorRemoveAdd=fullResyncMirrorDbIds,
useUtilityMode=False,
allowPrimary=False
)
self.__logger.info("Updating mirrors")
if len(rewindInfo) != 0:
self.__logger.info("Running pg_rewind on required mirrors")
rewindFailedSegments = self.run_pg_rewind(rewindInfo)
# Do not start mirrors that failed pg_rewind
for failedSegment in rewindFailedSegments:
mirrorsToStart.remove(failedSegment)
self.__logger.info("Starting mirrors")
start_all_successful = self.__startAll(gpEnv, gpArray, mirrorsToStart)
finally:
# Re-enable Ctrl-C
signal.signal(signal.SIGINT, signal.default_int_handler)
if len(rewindFailedSegments) != 0:
return False
return start_all_successful
def run_pg_rewind(self, rewindInfo):
"""
Run pg_rewind for incremental recovery.
"""
rewindFailedSegments = []
# Run pg_rewind on all the targets
cmds = []
progressCmds = []
removeCmds= []
for rewindSeg in list(rewindInfo.values()):
# Do CHECKPOINT on source to force TimeLineID to be updated in pg_control.
# pg_rewind wants that to make incremental recovery successful finally.
self.__logger.debug('Do CHECKPOINT on %s (port: %d) before running pg_rewind.' % (rewindSeg.sourceHostname, rewindSeg.sourcePort))
dburl = dbconn.DbURL(hostname=rewindSeg.sourceHostname,
port=rewindSeg.sourcePort,
dbname='template1')
conn = dbconn.connect(dburl, utility=True)
dbconn.execSQL(conn, "CHECKPOINT")
conn.close()
# If the postmaster.pid still exists and another process
# is actively using that pid, pg_rewind will fail when it
# tries to start the failed segment in single-user
# mode. It should be safe to remove the postmaster.pid
# file since we do not expect the failed segment to be up.
self.remove_postmaster_pid_from_remotehost(
rewindSeg.targetSegment.getSegmentHostName(),
rewindSeg.targetSegment.getSegmentDataDirectory())
# Note the command name, we use the dbid later to
# correlate the command results with GpMirrorToBuild
# object.
cmd = gp.SegmentRewind('rewind dbid: %s' %
rewindSeg.targetSegment.getSegmentDbId(),
rewindSeg.targetSegment.getSegmentHostName(),
rewindSeg.targetSegment.getSegmentDataDirectory(),
rewindSeg.sourceHostname,
rewindSeg.sourcePort,
rewindSeg.progressFile,
verbose=True)
progressCmd, removeCmd = self.__getProgressAndRemoveCmds(rewindSeg.progressFile,
rewindSeg.targetSegment.getSegmentDbId(),
rewindSeg.targetSegment.getSegmentHostName())
cmds.append(cmd)
removeCmds.append(removeCmd)
if progressCmd:
progressCmds.append(progressCmd)
completedCmds = self.__runWaitAndCheckWorkerPoolForErrorsAndClear(cmds, "rewinding segments",
suppressErrorCheck=True,
progressCmds=progressCmds)
self.__runWaitAndCheckWorkerPoolForErrorsAndClear(removeCmds, "removing rewind progress logfiles",
suppressErrorCheck=False)
rewindFailedSegments = []
for cmd in completedCmds:
self.__logger.debug('pg_rewind results: %s' % cmd.results)
if not cmd.was_successful():
dbid = int(cmd.name.split(':')[1].strip())
self.__logger.debug("%s failed" % cmd.name)
self.__logger.warning(cmd.get_stdout())
self.__logger.warning("Incremental recovery failed for dbid %d. You must use gprecoverseg -F to recover the segment." % dbid)
rewindFailedSegments.append(rewindInfo[dbid].targetSegment)
return rewindFailedSegments
def remove_postmaster_pid_from_remotehost(self, host, datadir):
cmd = base.Command(name = 'remove the postmaster.pid file',
cmdStr = 'rm -f %s/postmaster.pid' % datadir,
ctxt=gp.REMOTE, remoteHost = host)
cmd.run()
return_code = cmd.get_return_code()
if return_code != 0:
raise ExecutionError("Failed while trying to remove postmaster.pid.", cmd)
def checkForPortAndDirectoryConflicts(self, gpArray):
"""
Check gpArray for internal consistency -- no duplicate ports or directories on the same host, for example
A detected problem causes an Exception to be raised
"""
for hostName, segmentArr in GpArray.getSegmentsByHostName(gpArray.getDbList()).items():
usedPorts = {}
usedDataDirectories = {}
for segment in segmentArr:
# check for port conflict
port = segment.getSegmentPort()
dbid = segment.getSegmentDbId()
if port in usedPorts:
raise Exception(
"Segment dbid's %s and %s on host %s cannot have the same port %s." %
(dbid, usedPorts.get(port), hostName, port))
usedPorts[port] = dbid
# check for directory conflict; could improve this by reporting nicer the conflicts
path = segment.getSegmentDataDirectory()
if path in usedDataDirectories:
raise Exception(
"Segment dbid's %s and %s on host %s cannot have the same data directory '%s'." %
(dbid, usedDataDirectories.get(path), hostName, path))
usedDataDirectories[path] = dbid
def _join_and_show_segment_progress(self, cmds, inplace=False, outfile=sys.stdout, interval=1):
written = False
def print_progress():
if written and inplace:
outfile.write("\x1B[%dA" % len(cmds))
output = []
for cmd in cmds:
try:
# since print_progress is called multiple times,
# cache cmdStr to reset it after being mutated by cmd.run()
cmd_str = cmd.cmdStr
cmd.run(validateAfter=True)
cmd.cmdStr = cmd_str
results = cmd.get_results().stdout.rstrip()
except ExecutionError:
lines = cmd.get_results().stderr.splitlines()
if lines:
results = lines[0]
else:
results = ''
output.append("%s (dbid %d): %s" % (cmd.remoteHost, cmd.dbid, results))
if inplace:
output.append("\x1B[K")
output.append("\n")
outfile.write("".join(output))
outfile.flush()
while not self.__pool.join(interval):
print_progress()
written = True
# Make sure every line is updated with the final status.
print_progress()
# There is race between when the recovery process creates the progressFile
# when this progressCmd is run. Thus, the progress command touches
# the file to ensure its presence before tailing.
def __getProgressAndRemoveCmds(self, progressFile, targetSegmentDbId, targetHostname):
progressCmd = None
if self.__progressMode != GpMirrorListToBuild.Progress.NONE:
progressCmd = GpMirrorListToBuild.ProgressCommand("tail the last line of the file",
"set -o pipefail; touch -a {0}; tail -1 {0} | tr '\\r' '\\n' | tail -1".format(
pipes.quote(progressFile)),
targetSegmentDbId,
progressFile,
ctxt=base.REMOTE,
remoteHost=targetHostname)
removeCmd = base.Command("remove file",
"rm -f %s" % pipes.quote(progressFile),
ctxt=base.REMOTE,
remoteHost=targetHostname)
return progressCmd, removeCmd
def __runWaitAndCheckWorkerPoolForErrorsAndClear(self, cmds, actionVerb, suppressErrorCheck=False,
progressCmds=[]):
for cmd in cmds:
self.__pool.addCommand(cmd)
if self.__quiet:
self.__pool.join()
elif progressCmds:
self._join_and_show_segment_progress(progressCmds,
inplace=self.__progressMode == GpMirrorListToBuild.Progress.INPLACE)
else:
base.join_and_indicate_progress(self.__pool)
if not suppressErrorCheck:
self.__pool.check_results()
completedRecoveryCmds = list(set(self.__pool.getCompletedItems()) & set(cmds))
self.__pool.empty_completed_items()
return completedRecoveryCmds
def __copyFiles(self, srcDir, destDir, fileNames):
for name in fileNames:
cmd = gp.LocalCopy("copy file for segment", srcDir + "/" + name, destDir + "/" + name)
cmd.run(validateAfter=True)
def __createEmptyDirectories(self, dir, newDirectoryNames):
for name in newDirectoryNames:
subDir = os.path.join(dir, name)
unix.MakeDirectory("create blank directory for segment", subDir).run(validateAfter=True)
unix.Chmod.local('set permissions on blank dir', subDir, '0700')
def __copySegmentDirectories(self, gpEnv, gpArray, directives):
"""
directives should be composed of GpCopySegmentDirectoryDirective values
"""
if len(directives) == 0:
return
srcSegments = []
destSegments = []
isTargetReusedLocation = []
timeStamp = datetime.datetime.today().strftime('%Y%m%d_%H%M%S')
for directive in directives:
srcSegment = directive.getSrcSegment()
destSegment = directive.getDestSegment()
destSegment.primaryHostname = srcSegment.getSegmentHostName()
destSegment.primarySegmentPort = srcSegment.getSegmentPort()
destSegment.progressFile = '%s/pg_basebackup.%s.dbid%s.out' % (gplog.get_logger_dir(),
timeStamp,
destSegment.getSegmentDbId())
srcSegments.append(srcSegment)
destSegments.append(destSegment)
isTargetReusedLocation.append(directive.isTargetReusedLocation())
destSegmentByHost = GpArray.getSegmentsByHostName(destSegments)
newSegmentInfo = gp.ConfigureNewSegment.buildSegmentInfoForNewSegment(destSegments, isTargetReusedLocation)
def createConfigureNewSegmentCommand(hostName, cmdLabel, validationOnly):
segmentInfo = newSegmentInfo[hostName]
checkNotNone("segmentInfo for %s" % hostName, segmentInfo)
return gp.ConfigureNewSegment(cmdLabel,
segmentInfo,
gplog.get_logger_dir(),
newSegments=True,
verbose=gplog.logging_is_verbose(),
batchSize=self.__parallelDegree,
ctxt=gp.REMOTE,
remoteHost=hostName,
validationOnly=validationOnly,
forceoverwrite=self.__forceoverwrite)
#
# validate directories for target segments
#
self.__logger.info('Validating remote directories')
cmds = []
for hostName in list(destSegmentByHost.keys()):
cmds.append(createConfigureNewSegmentCommand(hostName, 'validate blank segments', True))
for cmd in cmds:
self.__pool.addCommand(cmd)
if self.__quiet:
self.__pool.join()
else:
base.join_and_indicate_progress(self.__pool)
validationErrors = []
for item in self.__pool.getCompletedItems():
results = item.get_results()
if not results.wasSuccessful():
if results.rc == 1:
# stdoutFromFailure = results.stdout.replace("\n", " ").strip()
lines = results.stderr.split("\n")
for line in lines:
if len(line.strip()) > 0:
validationErrors.append("Validation failure on host %s %s" % (item.remoteHost, line))
else:
validationErrors.append(str(item))
self.__pool.empty_completed_items()
if validationErrors:
raise ExceptionNoStackTraceNeeded("\n" + ("\n".join(validationErrors)))
# Configure a new segment
#
# Recover segments using gpconfigurenewsegment, which
# uses pg_basebackup. gprecoverseg generates a log filename which is
# passed to gpconfigurenewsegment as a confinfo parameter. gprecoverseg
# tails this file to show recovery progress to the user, and removes the
# file when one done. A new file is generated for each run of
# gprecoverseg based on a timestamp.
self.__logger.info('Configuring new segments')
cmds = []
progressCmds = []
removeCmds= []
for hostName in list(destSegmentByHost.keys()):
for segment in destSegmentByHost[hostName]:
progressCmd, removeCmd = self.__getProgressAndRemoveCmds(segment.progressFile,
segment.getSegmentDbId(),
hostName)
removeCmds.append(removeCmd)
if progressCmd:
progressCmds.append(progressCmd)
cmds.append(
createConfigureNewSegmentCommand(hostName, 'configure blank segments', False))
self.__runWaitAndCheckWorkerPoolForErrorsAndClear(cmds, "unpacking basic segment directory",
suppressErrorCheck=False,
progressCmds=progressCmds)
self.__runWaitAndCheckWorkerPoolForErrorsAndClear(removeCmds, "removing pg_basebackup progress logfiles",
suppressErrorCheck=False)
#
# copy dump files from old segment to new segment
#
for srcSeg in srcSegments:
for destSeg in destSegments:
if srcSeg.content == destSeg.content:
src_dump_dir = os.path.join(srcSeg.getSegmentDataDirectory(), 'db_dumps')
cmd = base.Command('check existence of db_dumps directory', 'ls %s' % (src_dump_dir),
ctxt=base.REMOTE, remoteHost=destSeg.getSegmentAddress())
cmd.run()
if cmd.results.rc == 0: # Only try to copy directory if it exists
cmd = Scp('copy db_dumps from old segment to new segment',
os.path.join(srcSeg.getSegmentDataDirectory(), 'db_dumps*', '*'),
os.path.join(destSeg.getSegmentDataDirectory(), 'db_dumps'),
srcSeg.getSegmentAddress(),
destSeg.getSegmentAddress(),
recursive=True)
cmd.run(validateAfter=True)
break
def _get_running_postgres_segments(self, segments):
running_segments = []
for seg in segments:
datadir = self.dereference_remote_symlink(seg.getSegmentDataDirectory(), seg.getSegmentHostName())
pid = get_pid_from_remotehost(seg.getSegmentHostName(), datadir)
if pid is not None:
if check_pid_on_remotehost(pid, seg.getSegmentHostName()):
if is_pid_postmaster(datadir, pid, seg.getSegmentHostName()):
running_segments.append(seg)
else:
self.__logger.info("Skipping to stop segment %s on host %s since it is not a postgres process" % (
seg.getSegmentDataDirectory(), seg.getSegmentHostName()))
else:
self.__logger.debug("Skipping to stop segment %s on host %s since process with pid %s is not running" % (
seg.getSegmentDataDirectory(), seg.getSegmentHostName(), pid))
else:
self.__logger.debug("Skipping to stop segment %s on host %s since pid could not be found" % (
seg.getSegmentDataDirectory(), seg.getSegmentHostName()))
return running_segments
def dereference_remote_symlink(self, datadir, host):
cmdStr = """python -c 'import os; print(os.path.realpath("%s"))'""" % datadir
cmd = base.Command('dereference a symlink on a remote host', cmdStr=cmdStr, ctxt=base.REMOTE, remoteHost=host)
cmd.run()
results = cmd.get_results()
if results.rc != 0:
self.__logger.warning('Unable to determine if %s is symlink. Assuming it is not symlink' % (datadir))
return datadir
return results.stdout.strip()
def __ensureStopped(self, gpEnv, directives):
"""
@param directives a list of the GpStopSegmentDirectoryDirective values indicating which segments to stop
"""
if len(directives) == 0:
return
self.__logger.info("Ensuring %d failed segment(s) are stopped" % (len(directives)))
segments = [d.getSegment() for d in directives]
segments = self._get_running_postgres_segments(segments)
segmentByHost = GpArray.getSegmentsByHostName(segments)
cmds = []
for hostName, segments in segmentByHost.items():
cmd = gp.GpSegStopCmd("remote segment stop on host '%s'" % hostName,
gpEnv.getGpHome(), gpEnv.getGpVersion(),
mode='fast', dbs=segments, verbose=gplog.logging_is_verbose(),
ctxt=base.REMOTE, remoteHost=hostName)
cmds.append(cmd)
# we suppress checking for the error. This is because gpsegstop will actually error
# in many cases where the stop is actually done (that is, for example, the segment is
# running but slow to shutdown so gpsegstop errors after whacking it with a kill)
#
# Perhaps we should make it so that it so that is checks if the seg is running and only attempt stop
# if it's running? In that case, we could propagate the error
#
self.__runWaitAndCheckWorkerPoolForErrorsAndClear(cmds, "stopping segments", suppressErrorCheck=True)
def __ensureMarkedDown(self, gpEnv, toEnsureMarkedDown):
"""Waits for FTS prober to mark segments as down"""
wait_time = 60 * 30 # Wait up to 30 minutes to handle very large, busy
# clusters that may have faults. In most cases the
# actual time to wait will be small and this operation
# is only needed when moving mirrors that are up and
# needed to be stopped, an uncommon operation.
dburl = dbconn.DbURL(port=gpEnv.getCoordinatorPort(), dbname='template1')
time_elapsed = 0
seg_up_count = 0
initial_seg_up_count = len(toEnsureMarkedDown)
last_seg_up_count = initial_seg_up_count
if initial_seg_up_count == 0:
# Nothing to wait on
return
self.__logger.info("Waiting for segments to be marked down.")
self.__logger.info("This may take up to %d seconds on large clusters." % wait_time)
# wait for all needed segments to be marked down by the prober. We'll wait
# a max time of double the interval
while wait_time > time_elapsed:
seg_up_count = 0
current_gparray = GpArray.initFromCatalog(dburl, True)
seg_db_map = current_gparray.getSegDbMap()
# go through and get the status of each segment we need to be marked down
for segdb in toEnsureMarkedDown:
if segdb.getSegmentDbId() in seg_db_map and seg_db_map[segdb.getSegmentDbId()].isSegmentUp():
seg_up_count += 1
if seg_up_count == 0:
break
else:
if last_seg_up_count != seg_up_count:
print("\n", end=' ')
self.__logger.info("%d of %d segments have been marked down." %
(initial_seg_up_count - seg_up_count, initial_seg_up_count))
last_seg_up_count = seg_up_count
for _i in range(1, 5):
time.sleep(1)
sys.stdout.write(".")
sys.stdout.flush()
time_elapsed += 5
if seg_up_count == 0:
print("\n", end=' ')
self.__logger.info("%d of %d segments have been marked down." %
(initial_seg_up_count, initial_seg_up_count))
else:
raise Exception("%d segments were not marked down by FTS" % seg_up_count)
def __cleanUpSegmentDirectories(self, directives):
if len(directives) == 0:
return
self.__logger.info("Cleaning files from %d segment(s)" % (len(directives)))
segments = [d.getSegment() for d in directives]
segmentByHost = GpArray.getSegmentsByHostName(segments)
cmds = []
for hostName, segments in segmentByHost.items():
cmds.append(gp.GpCleanSegmentDirectories("clean segment directories on %s" % hostName,
segments, gp.REMOTE, hostName))
self.__runWaitAndCheckWorkerPoolForErrorsAndClear(cmds, "cleaning existing directories")
def __createStartSegmentsOp(self, gpEnv):
return startSegments.StartSegmentsOperation(self.__pool, self.__quiet,
gpEnv.getGpVersion(),
gpEnv.getGpHome(), gpEnv.getCoordinatorDataDir()
)
def __updateGpIdFile(self, gpEnv, gpArray, segments):
segmentByHost = GpArray.getSegmentsByHostName(segments)
newSegmentInfo = gp.ConfigureNewSegment.buildSegmentInfoForNewSegment(segments)
cmds = []
for hostName in list(segmentByHost.keys()):
segmentInfo = newSegmentInfo[hostName]
checkNotNone("segmentInfo for %s" % hostName, segmentInfo)
cmd = gp.ConfigureNewSegment("update gpid file",
segmentInfo,
gplog.get_logger_dir(),
newSegments=False,
verbose=gplog.logging_is_verbose(),
batchSize=self.__parallelDegree,
ctxt=gp.REMOTE,
remoteHost=hostName,
validationOnly=False,
writeGpIdFileOnly=True)
cmds.append(cmd)
self.__runWaitAndCheckWorkerPoolForErrorsAndClear(cmds, "writing updated gpid files")
def __startAll(self, gpEnv, gpArray, segments):
# the newly started segments should belong to the current era
era = read_era(gpEnv.getCoordinatorDataDir(), logger=self.__logger)
segmentStartResult = self.__createStartSegmentsOp(gpEnv).startSegments(gpArray, segments,
startSegments.START_AS_MIRRORLESS,
era)
start_all_successfull = len(segmentStartResult.getFailedSegmentObjs()) == 0
for failure in segmentStartResult.getFailedSegmentObjs():
failedSeg = failure.getSegment()
failureReason = failure.getReason()
self.__logger.warn(
"Failed to start segment. The fault prober will shortly mark it as down. Segment: %s: REASON: %s" % (
failedSeg, failureReason))
return start_all_successfull
class GpCleanupSegmentDirectoryDirective:
def __init__(self, segment):
checkNotNone("segment", segment)
self.__segment = segment
def getSegment(self):
return self.__segment
class GpStopSegmentDirectoryDirective:
def __init__(self, segment):
checkNotNone("segment", segment)
self.__segment = segment
def getSegment(self):
return self.__segment
class GpCopySegmentDirectoryDirective:
def __init__(self, source, dest, isTargetReusedLocation):
"""
@param isTargetReusedLocation if True then the dest location is a cleaned-up location
"""
checkNotNone("source", source)
checkNotNone("dest", dest)
self.__source = source
self.__dest = dest
self.__isTargetReusedLocation = isTargetReusedLocation
def getSrcSegment(self):
return self.__source
def getDestSegment(self):
return self.__dest
def isTargetReusedLocation(self):
return self.__isTargetReusedLocation
| 45.652902 | 154 | 0.590661 | import datetime
import os
import pipes
import signal
import time
from gppylib.mainUtils import *
from gppylib.utils import checkNotNone
from gppylib.db import dbconn
from gppylib import gparray, gplog
from gppylib.commands import unix
from gppylib.commands import gp
from gppylib.commands import base
from gppylib.gparray import GpArray
from gppylib.operations import startSegments
from gppylib.gp_era import read_era
from gppylib.operations.utils import ParallelOperation, RemoteOperation
from gppylib.system import configurationInterface as configInterface
from gppylib.commands.gp import is_pid_postmaster, get_pid_from_remotehost
from gppylib.commands.unix import check_pid_on_remotehost, Scp
logger = gplog.get_default_logger()
gDatabaseDirectories = [
"global",
"log",
"pg_wal",
"pg_xact",
"pg_changetracking",
"pg_subtrans",
"pg_twophase",
"pg_multixact",
"pg_distributedxidmap",
"pg_distributedlog",
"base",
"pg_tblspc",
"pg_stat_tmp"
]
gDatabaseFiles = [
"PG_VERSION",
"pg_hba.conf",
"pg_ident.conf",
"postgresql.conf",
"postmaster.log",
"postmaster.opts",
"postmaster.pid",
]
# failed is not (if both set)...change that, or at least protect against problems
#
class GpMirrorToBuild:
def __init__(self, failedSegment, liveSegment, failoverSegment, forceFullSynchronization, logger=logger):
checkNotNone("liveSegment", liveSegment)
checkNotNone("forceFullSynchronization", forceFullSynchronization)
if failedSegment is None and failoverSegment is None:
raise Exception("No mirror passed to GpMirrorToBuild")
if not liveSegment.isSegmentQE():
raise ExceptionNoStackTraceNeeded("Segment to recover from for content %s is not a correct segment "
"(it is a coordinator or standby coordinator)" % liveSegment.getSegmentContentId())
if not liveSegment.isSegmentPrimary(True):
raise ExceptionNoStackTraceNeeded(
"Segment to recover from for content %s is not a primary" % liveSegment.getSegmentContentId())
if not liveSegment.isSegmentUp():
raise ExceptionNoStackTraceNeeded(
"Primary segment is not up for content %s" % liveSegment.getSegmentContentId())
if failedSegment is not None:
if failedSegment.getSegmentContentId() != liveSegment.getSegmentContentId():
raise ExceptionNoStackTraceNeeded(
"The primary is not of the same content as the failed mirror. Primary content %d, "
"mirror content %d" % (liveSegment.getSegmentContentId(), failedSegment.getSegmentContentId()))
if failedSegment.getSegmentDbId() == liveSegment.getSegmentDbId():
raise ExceptionNoStackTraceNeeded("For content %d, the dbid values are the same. "
"A segment may not be recovered from itself" %
liveSegment.getSegmentDbId())
if failoverSegment is not None:
if failoverSegment.getSegmentContentId() != liveSegment.getSegmentContentId():
raise ExceptionNoStackTraceNeeded(
"The primary is not of the same content as the mirror. Primary content %d, "
"mirror content %d" % (liveSegment.getSegmentContentId(), failoverSegment.getSegmentContentId()))
if failoverSegment.getSegmentDbId() == liveSegment.getSegmentDbId():
raise ExceptionNoStackTraceNeeded("For content %d, the dbid values are the same. "
"A segment may not be built from itself"
% liveSegment.getSegmentDbId())
if failedSegment is not None and failoverSegment is not None:
# for now, we require the code to have produced this -- even when moving the segment to another
# location, we preserve the directory
assert failedSegment.getSegmentDbId() == failoverSegment.getSegmentDbId()
self.__failedSegment = failedSegment
self.__liveSegment = liveSegment
self.__failoverSegment = failoverSegment
self.__forceFullSynchronization = forceFullSynchronization
def getFailedSegment(self):
return self.__failedSegment
def getLiveSegment(self):
return self.__liveSegment
def getFailoverSegment(self):
return self.__failoverSegment
def isFullSynchronization(self):
if self.__forceFullSynchronization:
return True
# if we are failing over to a new segment location then we must fully resync
if self.__failoverSegment is not None:
return True
return False
class GpMirrorListToBuild:
class Progress:
NONE = 0
INPLACE = 1
SEQUENTIAL = 2
def __init__(self, toBuild, pool, quiet, parallelDegree, additionalWarnings=None, logger=logger, forceoverwrite=False, progressMode=Progress.INPLACE):
self.__mirrorsToBuild = toBuild
self.__pool = pool
self.__quiet = quiet
self.__progressMode = progressMode
self.__parallelDegree = parallelDegree
self.__forceoverwrite = forceoverwrite
self.__additionalWarnings = additionalWarnings or []
if not logger:
raise Exception('logger argument cannot be None')
self.__logger = logger
class ProgressCommand(gp.Command):
def __init__(self, name, cmdStr, dbid, filePath, ctxt, remoteHost):
super(GpMirrorListToBuild.ProgressCommand, self).__init__(name, cmdStr, ctxt, remoteHost)
self.dbid = dbid
self.filePath = filePath
def getMirrorsToBuild(self):
return [m for m in self.__mirrorsToBuild]
def getAdditionalWarnings(self):
return self.__additionalWarnings
class RewindSegmentInfo:
def __init__(self, targetSegment, sourceHostname, sourcePort, timeStamp):
self.targetSegment = targetSegment
self.sourceHostname = sourceHostname
self.sourcePort = sourcePort
self.progressFile = '%s/pg_rewind.%s.dbid%s.out' % (gplog.get_logger_dir(),
timeStamp,
targetSegment.getSegmentDbId())
def buildMirrors(self, actionName, gpEnv, gpArray):
if len(self.__mirrorsToBuild) == 0:
self.__logger.info("No segments to " + actionName)
return True
self.checkForPortAndDirectoryConflicts(gpArray)
self.__logger.info("%s segment(s) to %s" % (len(self.__mirrorsToBuild), actionName))
# make sure the target directories are up-to-date
# by cleaning them, if needed, and then copying a basic directory there
# the postgresql.conf in that basic directory will need updating (to change the port)
toStopDirectives = []
toEnsureMarkedDown = []
cleanupDirectives = []
copyDirectives = []
for toRecover in self.__mirrorsToBuild:
if toRecover.getFailedSegment() is not None:
# will stop the failed segment. Note that we do this even if we are recovering to a different location!
toStopDirectives.append(GpStopSegmentDirectoryDirective(toRecover.getFailedSegment()))
if toRecover.getFailedSegment().getSegmentStatus() == gparray.STATUS_UP:
toEnsureMarkedDown.append(toRecover.getFailedSegment())
if toRecover.isFullSynchronization():
isTargetReusedLocation = False
if toRecover.getFailedSegment() is not None and \
toRecover.getFailoverSegment() is None:
#
# We are recovering a failed segment in-place
#
cleanupDirectives.append(GpCleanupSegmentDirectoryDirective(toRecover.getFailedSegment()))
isTargetReusedLocation = True
if toRecover.getFailoverSegment() is not None:
targetSegment = toRecover.getFailoverSegment()
else:
targetSegment = toRecover.getFailedSegment()
d = GpCopySegmentDirectoryDirective(toRecover.getLiveSegment(), targetSegment, isTargetReusedLocation)
copyDirectives.append(d)
self.__ensureStopped(gpEnv, toStopDirectives)
self.__ensureMarkedDown(gpEnv, toEnsureMarkedDown)
if not self.__forceoverwrite:
self.__cleanUpSegmentDirectories(cleanupDirectives)
self.__copySegmentDirectories(gpEnv, gpArray, copyDirectives)
# update and save metadata in memory
for toRecover in self.__mirrorsToBuild:
if toRecover.getFailoverSegment() is None:
# we are recovering the lost segment in place
seg = toRecover.getFailedSegment()
else:
seg = toRecover.getFailedSegment()
# no need to update the failed segment's information -- it is
for gpArraySegment in gpArray.getDbList():
if gpArraySegment is seg:
raise Exception(
"failed segment should not be in the new configuration if failing over to new segment")
seg = toRecover.getFailoverSegment()
seg.setSegmentStatus(gparray.STATUS_DOWN)
seg.setSegmentMode(gparray.MODE_NOT_SYNC)
# figure out what needs to be started or transitioned
mirrorsToStart = []
# Map of mirror dbid to GpMirrorListToBuild.RewindSegmentInfo objects
rewindInfo = {}
primariesToConvert = []
convertPrimaryUsingFullResync = []
fullResyncMirrorDbIds = {}
timeStamp = datetime.datetime.today().strftime('%Y%m%d_%H%M%S')
for toRecover in self.__mirrorsToBuild:
seg = toRecover.getFailoverSegment()
if seg is None:
seg = toRecover.getFailedSegment() # we are recovering in place
mirrorsToStart.append(seg)
primarySeg = toRecover.getLiveSegment()
# Add to rewindInfo to execute pg_rewind later if we are not
# using full recovery. We will run pg_rewind on incremental recovery
# if the target mirror does not have standby.signal file because
# segment failover happened. The check for standby.signal file will
# happen in the same remote SegmentRewind Command call.
if not toRecover.isFullSynchronization() \
and seg.getSegmentRole() == gparray.ROLE_MIRROR:
rewindInfo[seg.getSegmentDbId()] = GpMirrorListToBuild.RewindSegmentInfo(
seg, primarySeg.getSegmentHostName(), primarySeg.getSegmentPort(),
timeStamp)
# The change in configuration to of the mirror to down requires that
# the primary also be marked as unsynchronized.
primarySeg.setSegmentMode(gparray.MODE_NOT_SYNC)
primariesToConvert.append(primarySeg)
convertPrimaryUsingFullResync.append(toRecover.isFullSynchronization())
if toRecover.isFullSynchronization() and seg.getSegmentDbId() > 0:
fullResyncMirrorDbIds[seg.getSegmentDbId()] = True
# should use mainUtils.getProgramName but I can't make it work!
programName = os.path.split(sys.argv[0])[-1]
signal.signal(signal.SIGINT, signal.SIG_IGN)
rewindFailedSegments = []
try:
self.__logger.info("Updating configuration with new mirrors")
configInterface.getConfigurationProvider().updateSystemConfig(
gpArray,
"%s: segment config for resync" % programName,
dbIdToForceMirrorRemoveAdd=fullResyncMirrorDbIds,
useUtilityMode=False,
allowPrimary=False
)
self.__logger.info("Updating mirrors")
if len(rewindInfo) != 0:
self.__logger.info("Running pg_rewind on required mirrors")
rewindFailedSegments = self.run_pg_rewind(rewindInfo)
for failedSegment in rewindFailedSegments:
mirrorsToStart.remove(failedSegment)
self.__logger.info("Starting mirrors")
start_all_successful = self.__startAll(gpEnv, gpArray, mirrorsToStart)
finally:
signal.signal(signal.SIGINT, signal.default_int_handler)
if len(rewindFailedSegments) != 0:
return False
return start_all_successful
def run_pg_rewind(self, rewindInfo):
rewindFailedSegments = []
cmds = []
progressCmds = []
removeCmds= []
for rewindSeg in list(rewindInfo.values()):
self.__logger.debug('Do CHECKPOINT on %s (port: %d) before running pg_rewind.' % (rewindSeg.sourceHostname, rewindSeg.sourcePort))
dburl = dbconn.DbURL(hostname=rewindSeg.sourceHostname,
port=rewindSeg.sourcePort,
dbname='template1')
conn = dbconn.connect(dburl, utility=True)
dbconn.execSQL(conn, "CHECKPOINT")
conn.close()
self.remove_postmaster_pid_from_remotehost(
rewindSeg.targetSegment.getSegmentHostName(),
rewindSeg.targetSegment.getSegmentDataDirectory())
cmd = gp.SegmentRewind('rewind dbid: %s' %
rewindSeg.targetSegment.getSegmentDbId(),
rewindSeg.targetSegment.getSegmentHostName(),
rewindSeg.targetSegment.getSegmentDataDirectory(),
rewindSeg.sourceHostname,
rewindSeg.sourcePort,
rewindSeg.progressFile,
verbose=True)
progressCmd, removeCmd = self.__getProgressAndRemoveCmds(rewindSeg.progressFile,
rewindSeg.targetSegment.getSegmentDbId(),
rewindSeg.targetSegment.getSegmentHostName())
cmds.append(cmd)
removeCmds.append(removeCmd)
if progressCmd:
progressCmds.append(progressCmd)
completedCmds = self.__runWaitAndCheckWorkerPoolForErrorsAndClear(cmds, "rewinding segments",
suppressErrorCheck=True,
progressCmds=progressCmds)
self.__runWaitAndCheckWorkerPoolForErrorsAndClear(removeCmds, "removing rewind progress logfiles",
suppressErrorCheck=False)
rewindFailedSegments = []
for cmd in completedCmds:
self.__logger.debug('pg_rewind results: %s' % cmd.results)
if not cmd.was_successful():
dbid = int(cmd.name.split(':')[1].strip())
self.__logger.debug("%s failed" % cmd.name)
self.__logger.warning(cmd.get_stdout())
self.__logger.warning("Incremental recovery failed for dbid %d. You must use gprecoverseg -F to recover the segment." % dbid)
rewindFailedSegments.append(rewindInfo[dbid].targetSegment)
return rewindFailedSegments
def remove_postmaster_pid_from_remotehost(self, host, datadir):
cmd = base.Command(name = 'remove the postmaster.pid file',
cmdStr = 'rm -f %s/postmaster.pid' % datadir,
ctxt=gp.REMOTE, remoteHost = host)
cmd.run()
return_code = cmd.get_return_code()
if return_code != 0:
raise ExecutionError("Failed while trying to remove postmaster.pid.", cmd)
def checkForPortAndDirectoryConflicts(self, gpArray):
for hostName, segmentArr in GpArray.getSegmentsByHostName(gpArray.getDbList()).items():
usedPorts = {}
usedDataDirectories = {}
for segment in segmentArr:
port = segment.getSegmentPort()
dbid = segment.getSegmentDbId()
if port in usedPorts:
raise Exception(
"Segment dbid's %s and %s on host %s cannot have the same port %s." %
(dbid, usedPorts.get(port), hostName, port))
usedPorts[port] = dbid
# check for directory conflict; could improve this by reporting nicer the conflicts
path = segment.getSegmentDataDirectory()
if path in usedDataDirectories:
raise Exception(
"Segment dbid's %s and %s on host %s cannot have the same data directory '%s'." %
(dbid, usedDataDirectories.get(path), hostName, path))
usedDataDirectories[path] = dbid
def _join_and_show_segment_progress(self, cmds, inplace=False, outfile=sys.stdout, interval=1):
written = False
def print_progress():
if written and inplace:
outfile.write("\x1B[%dA" % len(cmds))
output = []
for cmd in cmds:
try:
cmd_str = cmd.cmdStr
cmd.run(validateAfter=True)
cmd.cmdStr = cmd_str
results = cmd.get_results().stdout.rstrip()
except ExecutionError:
lines = cmd.get_results().stderr.splitlines()
if lines:
results = lines[0]
else:
results = ''
output.append("%s (dbid %d): %s" % (cmd.remoteHost, cmd.dbid, results))
if inplace:
output.append("\x1B[K")
output.append("\n")
outfile.write("".join(output))
outfile.flush()
while not self.__pool.join(interval):
print_progress()
written = True
print_progress()
def __getProgressAndRemoveCmds(self, progressFile, targetSegmentDbId, targetHostname):
progressCmd = None
if self.__progressMode != GpMirrorListToBuild.Progress.NONE:
progressCmd = GpMirrorListToBuild.ProgressCommand("tail the last line of the file",
"set -o pipefail; touch -a {0}; tail -1 {0} | tr '\\r' '\\n' | tail -1".format(
pipes.quote(progressFile)),
targetSegmentDbId,
progressFile,
ctxt=base.REMOTE,
remoteHost=targetHostname)
removeCmd = base.Command("remove file",
"rm -f %s" % pipes.quote(progressFile),
ctxt=base.REMOTE,
remoteHost=targetHostname)
return progressCmd, removeCmd
def __runWaitAndCheckWorkerPoolForErrorsAndClear(self, cmds, actionVerb, suppressErrorCheck=False,
progressCmds=[]):
for cmd in cmds:
self.__pool.addCommand(cmd)
if self.__quiet:
self.__pool.join()
elif progressCmds:
self._join_and_show_segment_progress(progressCmds,
inplace=self.__progressMode == GpMirrorListToBuild.Progress.INPLACE)
else:
base.join_and_indicate_progress(self.__pool)
if not suppressErrorCheck:
self.__pool.check_results()
completedRecoveryCmds = list(set(self.__pool.getCompletedItems()) & set(cmds))
self.__pool.empty_completed_items()
return completedRecoveryCmds
def __copyFiles(self, srcDir, destDir, fileNames):
for name in fileNames:
cmd = gp.LocalCopy("copy file for segment", srcDir + "/" + name, destDir + "/" + name)
cmd.run(validateAfter=True)
def __createEmptyDirectories(self, dir, newDirectoryNames):
for name in newDirectoryNames:
subDir = os.path.join(dir, name)
unix.MakeDirectory("create blank directory for segment", subDir).run(validateAfter=True)
unix.Chmod.local('set permissions on blank dir', subDir, '0700')
def __copySegmentDirectories(self, gpEnv, gpArray, directives):
if len(directives) == 0:
return
srcSegments = []
destSegments = []
isTargetReusedLocation = []
timeStamp = datetime.datetime.today().strftime('%Y%m%d_%H%M%S')
for directive in directives:
srcSegment = directive.getSrcSegment()
destSegment = directive.getDestSegment()
destSegment.primaryHostname = srcSegment.getSegmentHostName()
destSegment.primarySegmentPort = srcSegment.getSegmentPort()
destSegment.progressFile = '%s/pg_basebackup.%s.dbid%s.out' % (gplog.get_logger_dir(),
timeStamp,
destSegment.getSegmentDbId())
srcSegments.append(srcSegment)
destSegments.append(destSegment)
isTargetReusedLocation.append(directive.isTargetReusedLocation())
destSegmentByHost = GpArray.getSegmentsByHostName(destSegments)
newSegmentInfo = gp.ConfigureNewSegment.buildSegmentInfoForNewSegment(destSegments, isTargetReusedLocation)
def createConfigureNewSegmentCommand(hostName, cmdLabel, validationOnly):
segmentInfo = newSegmentInfo[hostName]
checkNotNone("segmentInfo for %s" % hostName, segmentInfo)
return gp.ConfigureNewSegment(cmdLabel,
segmentInfo,
gplog.get_logger_dir(),
newSegments=True,
verbose=gplog.logging_is_verbose(),
batchSize=self.__parallelDegree,
ctxt=gp.REMOTE,
remoteHost=hostName,
validationOnly=validationOnly,
forceoverwrite=self.__forceoverwrite)
self.__logger.info('Validating remote directories')
cmds = []
for hostName in list(destSegmentByHost.keys()):
cmds.append(createConfigureNewSegmentCommand(hostName, 'validate blank segments', True))
for cmd in cmds:
self.__pool.addCommand(cmd)
if self.__quiet:
self.__pool.join()
else:
base.join_and_indicate_progress(self.__pool)
validationErrors = []
for item in self.__pool.getCompletedItems():
results = item.get_results()
if not results.wasSuccessful():
if results.rc == 1:
lines = results.stderr.split("\n")
for line in lines:
if len(line.strip()) > 0:
validationErrors.append("Validation failure on host %s %s" % (item.remoteHost, line))
else:
validationErrors.append(str(item))
self.__pool.empty_completed_items()
if validationErrors:
raise ExceptionNoStackTraceNeeded("\n" + ("\n".join(validationErrors)))
self.__logger.info('Configuring new segments')
cmds = []
progressCmds = []
removeCmds= []
for hostName in list(destSegmentByHost.keys()):
for segment in destSegmentByHost[hostName]:
progressCmd, removeCmd = self.__getProgressAndRemoveCmds(segment.progressFile,
segment.getSegmentDbId(),
hostName)
removeCmds.append(removeCmd)
if progressCmd:
progressCmds.append(progressCmd)
cmds.append(
createConfigureNewSegmentCommand(hostName, 'configure blank segments', False))
self.__runWaitAndCheckWorkerPoolForErrorsAndClear(cmds, "unpacking basic segment directory",
suppressErrorCheck=False,
progressCmds=progressCmds)
self.__runWaitAndCheckWorkerPoolForErrorsAndClear(removeCmds, "removing pg_basebackup progress logfiles",
suppressErrorCheck=False)
for srcSeg in srcSegments:
for destSeg in destSegments:
if srcSeg.content == destSeg.content:
src_dump_dir = os.path.join(srcSeg.getSegmentDataDirectory(), 'db_dumps')
cmd = base.Command('check existence of db_dumps directory', 'ls %s' % (src_dump_dir),
ctxt=base.REMOTE, remoteHost=destSeg.getSegmentAddress())
cmd.run()
if cmd.results.rc == 0:
cmd = Scp('copy db_dumps from old segment to new segment',
os.path.join(srcSeg.getSegmentDataDirectory(), 'db_dumps*', '*'),
os.path.join(destSeg.getSegmentDataDirectory(), 'db_dumps'),
srcSeg.getSegmentAddress(),
destSeg.getSegmentAddress(),
recursive=True)
cmd.run(validateAfter=True)
break
def _get_running_postgres_segments(self, segments):
running_segments = []
for seg in segments:
datadir = self.dereference_remote_symlink(seg.getSegmentDataDirectory(), seg.getSegmentHostName())
pid = get_pid_from_remotehost(seg.getSegmentHostName(), datadir)
if pid is not None:
if check_pid_on_remotehost(pid, seg.getSegmentHostName()):
if is_pid_postmaster(datadir, pid, seg.getSegmentHostName()):
running_segments.append(seg)
else:
self.__logger.info("Skipping to stop segment %s on host %s since it is not a postgres process" % (
seg.getSegmentDataDirectory(), seg.getSegmentHostName()))
else:
self.__logger.debug("Skipping to stop segment %s on host %s since process with pid %s is not running" % (
seg.getSegmentDataDirectory(), seg.getSegmentHostName(), pid))
else:
self.__logger.debug("Skipping to stop segment %s on host %s since pid could not be found" % (
seg.getSegmentDataDirectory(), seg.getSegmentHostName()))
return running_segments
def dereference_remote_symlink(self, datadir, host):
cmdStr = """python -c 'import os; print(os.path.realpath("%s"))'""" % datadir
cmd = base.Command('dereference a symlink on a remote host', cmdStr=cmdStr, ctxt=base.REMOTE, remoteHost=host)
cmd.run()
results = cmd.get_results()
if results.rc != 0:
self.__logger.warning('Unable to determine if %s is symlink. Assuming it is not symlink' % (datadir))
return datadir
return results.stdout.strip()
def __ensureStopped(self, gpEnv, directives):
if len(directives) == 0:
return
self.__logger.info("Ensuring %d failed segment(s) are stopped" % (len(directives)))
segments = [d.getSegment() for d in directives]
segments = self._get_running_postgres_segments(segments)
segmentByHost = GpArray.getSegmentsByHostName(segments)
cmds = []
for hostName, segments in segmentByHost.items():
cmd = gp.GpSegStopCmd("remote segment stop on host '%s'" % hostName,
gpEnv.getGpHome(), gpEnv.getGpVersion(),
mode='fast', dbs=segments, verbose=gplog.logging_is_verbose(),
ctxt=base.REMOTE, remoteHost=hostName)
cmds.append(cmd)
#
self.__runWaitAndCheckWorkerPoolForErrorsAndClear(cmds, "stopping segments", suppressErrorCheck=True)
def __ensureMarkedDown(self, gpEnv, toEnsureMarkedDown):
wait_time = 60 * 30 # Wait up to 30 minutes to handle very large, busy
# clusters that may have faults. In most cases the
# actual time to wait will be small and this operation
# is only needed when moving mirrors that are up and
# needed to be stopped, an uncommon operation.
dburl = dbconn.DbURL(port=gpEnv.getCoordinatorPort(), dbname='template1')
time_elapsed = 0
seg_up_count = 0
initial_seg_up_count = len(toEnsureMarkedDown)
last_seg_up_count = initial_seg_up_count
if initial_seg_up_count == 0:
# Nothing to wait on
return
self.__logger.info("Waiting for segments to be marked down.")
self.__logger.info("This may take up to %d seconds on large clusters." % wait_time)
# wait for all needed segments to be marked down by the prober. We'll wait
while wait_time > time_elapsed:
seg_up_count = 0
current_gparray = GpArray.initFromCatalog(dburl, True)
seg_db_map = current_gparray.getSegDbMap()
for segdb in toEnsureMarkedDown:
if segdb.getSegmentDbId() in seg_db_map and seg_db_map[segdb.getSegmentDbId()].isSegmentUp():
seg_up_count += 1
if seg_up_count == 0:
break
else:
if last_seg_up_count != seg_up_count:
print("\n", end=' ')
self.__logger.info("%d of %d segments have been marked down." %
(initial_seg_up_count - seg_up_count, initial_seg_up_count))
last_seg_up_count = seg_up_count
for _i in range(1, 5):
time.sleep(1)
sys.stdout.write(".")
sys.stdout.flush()
time_elapsed += 5
if seg_up_count == 0:
print("\n", end=' ')
self.__logger.info("%d of %d segments have been marked down." %
(initial_seg_up_count, initial_seg_up_count))
else:
raise Exception("%d segments were not marked down by FTS" % seg_up_count)
def __cleanUpSegmentDirectories(self, directives):
if len(directives) == 0:
return
self.__logger.info("Cleaning files from %d segment(s)" % (len(directives)))
segments = [d.getSegment() for d in directives]
segmentByHost = GpArray.getSegmentsByHostName(segments)
cmds = []
for hostName, segments in segmentByHost.items():
cmds.append(gp.GpCleanSegmentDirectories("clean segment directories on %s" % hostName,
segments, gp.REMOTE, hostName))
self.__runWaitAndCheckWorkerPoolForErrorsAndClear(cmds, "cleaning existing directories")
def __createStartSegmentsOp(self, gpEnv):
return startSegments.StartSegmentsOperation(self.__pool, self.__quiet,
gpEnv.getGpVersion(),
gpEnv.getGpHome(), gpEnv.getCoordinatorDataDir()
)
def __updateGpIdFile(self, gpEnv, gpArray, segments):
segmentByHost = GpArray.getSegmentsByHostName(segments)
newSegmentInfo = gp.ConfigureNewSegment.buildSegmentInfoForNewSegment(segments)
cmds = []
for hostName in list(segmentByHost.keys()):
segmentInfo = newSegmentInfo[hostName]
checkNotNone("segmentInfo for %s" % hostName, segmentInfo)
cmd = gp.ConfigureNewSegment("update gpid file",
segmentInfo,
gplog.get_logger_dir(),
newSegments=False,
verbose=gplog.logging_is_verbose(),
batchSize=self.__parallelDegree,
ctxt=gp.REMOTE,
remoteHost=hostName,
validationOnly=False,
writeGpIdFileOnly=True)
cmds.append(cmd)
self.__runWaitAndCheckWorkerPoolForErrorsAndClear(cmds, "writing updated gpid files")
def __startAll(self, gpEnv, gpArray, segments):
era = read_era(gpEnv.getCoordinatorDataDir(), logger=self.__logger)
segmentStartResult = self.__createStartSegmentsOp(gpEnv).startSegments(gpArray, segments,
startSegments.START_AS_MIRRORLESS,
era)
start_all_successfull = len(segmentStartResult.getFailedSegmentObjs()) == 0
for failure in segmentStartResult.getFailedSegmentObjs():
failedSeg = failure.getSegment()
failureReason = failure.getReason()
self.__logger.warn(
"Failed to start segment. The fault prober will shortly mark it as down. Segment: %s: REASON: %s" % (
failedSeg, failureReason))
return start_all_successfull
class GpCleanupSegmentDirectoryDirective:
def __init__(self, segment):
checkNotNone("segment", segment)
self.__segment = segment
def getSegment(self):
return self.__segment
class GpStopSegmentDirectoryDirective:
def __init__(self, segment):
checkNotNone("segment", segment)
self.__segment = segment
def getSegment(self):
return self.__segment
class GpCopySegmentDirectoryDirective:
def __init__(self, source, dest, isTargetReusedLocation):
checkNotNone("source", source)
checkNotNone("dest", dest)
self.__source = source
self.__dest = dest
self.__isTargetReusedLocation = isTargetReusedLocation
def getSrcSegment(self):
return self.__source
def getDestSegment(self):
return self.__dest
def isTargetReusedLocation(self):
return self.__isTargetReusedLocation
| true | true |
f71eeb544fc08fe949ef91f0ff9a44660507c8ee | 6,583 | py | Python | plycompiler/parse.py | kevinaangstadt/pushdown | 8b085eea189a830ae73ea1c3b0de34d105010231 | [
"BSD-3-Clause"
] | 1 | 2017-12-13T15:15:21.000Z | 2017-12-13T15:15:21.000Z | plycompiler/parse.py | kevinaangstadt/pushdown | 8b085eea189a830ae73ea1c3b0de34d105010231 | [
"BSD-3-Clause"
] | null | null | null | plycompiler/parse.py | kevinaangstadt/pushdown | 8b085eea189a830ae73ea1c3b0de34d105010231 | [
"BSD-3-Clause"
] | null | null | null | import ply.yacc as yacc
# get the token map from the lexer
from lex import tokens
import sys
import pushdown
Pushdown = pushdown.Pushdown
State = pushdown.State
Rule = pushdown.Rule
ParseState = pushdown.ParseState
Production = pushdown.Production
Shift = pushdown.Shift
Reduce = pushdown.Reduce
# Grammar
#
# file := GRAMMAR NEWLINE rulelist NEWLINE TLIST NEWLINE tlist NEWLINE
# NTLIST NEWLINE ntlist NEWLINE PMETHOD NEWLINE statelist
# rulelist := rule rulelist
# | empty
# tlist := tterm tlist
# | empty
# ntlist := ntterm ntlist
# | empty
# statelist := state statelist
# | empty
# rule := RULE production
# tterm := TERMINAL COLON numbers
# ntterm := NONTERMINAL COLON numbers
# state := STATE NEWLINE srulelist NEWLINE trules NEWLINE ntrules NEWLINE
# production := NONTERMINAL RARROW rhs
# numbers := INT numbers
# | empty
# srulelist := srule
# | empty
# trules := trule trules
# | empty
# ntrules := ntrule ntrules
# | empty
# rhs := exp erhs
# erhs := exp erhs
# | empty
# srule := LPAREN INT RPAREN production
# trule := TERMINAL operation
# ntrule := NONTERMINAL operation
# exp := DOT | TERMINAL | NONTERMINAL
# operation := SHIFT | REDUCE LPAREN production RPAREN
terminals = ["<empty>"]
non_terminals = []
def p_file(p):
'''file : anythinglist GRAMMAR DNEWLINE rulelist TLIST DNEWLINE tlist NTLIST DNEWLINE ntlist PMETHOD DNEWLINE statelist'''
terms = [x for (x, _) in p[7]]
nterms = [x for (x, _) in p[10]]
print "non_terminals:", non_terminals
print "terminals:", terminals
p[0] = Pushdown(p[13], terms, nterms)
for r in p[4]:
p[0].add_rule(r)
for s in p[13]:
p[0].add_state(s)
for k, v in p[7]:
p[0].add_t(k, v)
for k, v in p[10]:
p[0].add_nt(k, v)
# ignore everything before we see the start of the GRAMMAR
def p_anything(p):
''' anything : RULE
| STATE
| TLIST
| NTLIST
| PMETHOD
| SHIFT
| REDUCE
| RARROW
| IDENT
| INT
| COLON
| LPAREN
| RPAREN
| DOT
| NEWLINE
| DNEWLINE'''
pass
# We'll simplify things by having a single rule for all our list productions
def p_list(p):
'''statelist : state statelist
| state NEWLINE statelist
| empty
numbers : INT numbers
| empty
trules : trule NEWLINE trules
| DNEWLINE
ntrules : ntrule NEWLINE ntrules
| empty
erhs : exp erhs
| empty
anythinglist : anything anythinglist
| empty'''
if len(p) == 2:
p[0] = list()
elif len(p) == 4:
p[0] = [p[1]] + p[3]
else:
p[0] = [p[1]] + p[2]
def p_non_empty_list(p):
'''tlist : tterm NEWLINE tlist
| tterm DNEWLINE
ntlist : ntterm NEWLINE ntlist
| ntterm DNEWLINE
srulelist : srule NEWLINE srulelist
| srule DNEWLINE
sactions : action NEWLINE sactions
| action DNEWLINE'''
if len(p) == 3:
p[0] = [p[1]]
else:
p[0] = [p[1]] + p[3]
# def p_rulelist(p):
# '''rulelist : ruleset DNEWLINE rulelist
# | empty'''
# if len(p) == 2:
# p[0] = list()
# else:
# p[0] = p[1] + p[3]
# def p_forced_list(p):
# '''trules : trule etrules
# ntrules : ntrule entrules'''
# p[0] = [p[1]] + p[2]
def p_ruleset(p):
'''rulelist : rule NEWLINE rulelist
| rule DNEWLINE'''
if len(p) == 3:
p[0] = [p[1]]
else:
p[0] = [p[1]] + p[3]
def p_rule(p):
'''rule : RULE production'''
p[0] = Rule(p[1], p[2]._lhs, p[2]._rhs)
def p_tterm(p):
'''tterm : IDENT COLON numbers'''
global terminals
terminals.append(p[1])
p[0] = (p[1], p[3])
def p_ntterm(p):
'''ntterm : IDENT COLON numbers'''
global non_terminals
non_terminals.append(p[1])
p[0] = (p[1], p[3])
def p_state(p):
'''state : STATE DNEWLINE srulelist sactions sactions
| STATE DNEWLINE srulelist sactions
| STATE DNEWLINE srulelist DNEWLINE'''
actions = []
if isinstance(p[4], list):
actions.extend([(x, y) for (x, y) in p[4] if y is not None])
if len(p) >= 6:
actions.extend([(x, y) for (x, y) in p[5] if y is not None])
# make a dict of t- and nt-transitions
t = dict()
nt = dict()
for k, v in actions:
if k in non_terminals:
nt[k] = v
else:
t[k] = v
p[0] = State(p[1], p[3], t, nt)
# def p_state_no_t(p):
# '''state : STATE dnewline srulelist dnewline ntrules NEWLINE'''
# # make a dict of t- and nt-transitions
# t = dict()
# nt = dict()
# for k, v in p[6]:
# nt[k] = v
# p[0] = State(p[1], p[2], t, nt)
#
#
# def p_state_no_nt(p):
# '''state : STATE dnewline srulelist NEWLINE trules NEWLINE'''
# # make a dict of t- and nt-transitions
# t = dict()
# nt = dict()
# for k, v in p[5]:
# t[k] = v
# p[0] = State(p[1], p[2], t, nt)
def p_production(p):
'''production : IDENT RARROW rhs'''
p[0] = Production(p[1], p[3])
def p_rhs(p):
'''rhs : exp erhs'''
p[0] = [p[1]] + p[2]
def p_srule(p):
'''srule : LPAREN INT RPAREN production'''
p[0] = ParseState(p[2], p[4]._rhs.index('.'))
def p_action(p):
'''trule : IDENT operation
ntrule : IDENT operation
action : IDENT operation
| BANG IDENT LBRACKET operation RBRACKET'''
if len(p) == 6:
p[0] = (p[2], None)
else:
p[0] = (p[1], p[2])
def p_exp(p):
'''exp : DOT
| IDENT'''
p[0] = p[1]
def p_operation(p):
'''operation : SHIFT
| REDUCE LPAREN production RPAREN'''
if len(p) == 2:
p[0] = Shift(p[1])
else:
p[0] = Reduce(ParseState(p[1], position=p[3]._rhs.index('.')))
# Error rule for syntax errors
def p_error(p):
if not p:
print "End of File!"
return
print "Syntax error at token", p.type, "on line", p.lineno
sys.exit(1)
def p_empty(p):
'''empty : '''
pass
parser = yacc.yacc()
| 23.765343 | 127 | 0.515874 | import ply.yacc as yacc
from lex import tokens
import sys
import pushdown
Pushdown = pushdown.Pushdown
State = pushdown.State
Rule = pushdown.Rule
ParseState = pushdown.ParseState
Production = pushdown.Production
Shift = pushdown.Shift
Reduce = pushdown.Reduce
terminals = ["<empty>"]
non_terminals = []
def p_file(p):
'''file : anythinglist GRAMMAR DNEWLINE rulelist TLIST DNEWLINE tlist NTLIST DNEWLINE ntlist PMETHOD DNEWLINE statelist'''
terms = [x for (x, _) in p[7]]
nterms = [x for (x, _) in p[10]]
print "non_terminals:", non_terminals
print "terminals:", terminals
p[0] = Pushdown(p[13], terms, nterms)
for r in p[4]:
p[0].add_rule(r)
for s in p[13]:
p[0].add_state(s)
for k, v in p[7]:
p[0].add_t(k, v)
for k, v in p[10]:
p[0].add_nt(k, v)
def p_anything(p):
''' anything : RULE
| STATE
| TLIST
| NTLIST
| PMETHOD
| SHIFT
| REDUCE
| RARROW
| IDENT
| INT
| COLON
| LPAREN
| RPAREN
| DOT
| NEWLINE
| DNEWLINE'''
pass
def p_list(p):
'''statelist : state statelist
| state NEWLINE statelist
| empty
numbers : INT numbers
| empty
trules : trule NEWLINE trules
| DNEWLINE
ntrules : ntrule NEWLINE ntrules
| empty
erhs : exp erhs
| empty
anythinglist : anything anythinglist
| empty'''
if len(p) == 2:
p[0] = list()
elif len(p) == 4:
p[0] = [p[1]] + p[3]
else:
p[0] = [p[1]] + p[2]
def p_non_empty_list(p):
'''tlist : tterm NEWLINE tlist
| tterm DNEWLINE
ntlist : ntterm NEWLINE ntlist
| ntterm DNEWLINE
srulelist : srule NEWLINE srulelist
| srule DNEWLINE
sactions : action NEWLINE sactions
| action DNEWLINE'''
if len(p) == 3:
p[0] = [p[1]]
else:
p[0] = [p[1]] + p[3]
# def p_rulelist(p):
# '''rulelist : ruleset DNEWLINE rulelist
# | empty'''
# if len(p) == 2:
# p[0] = list()
# else:
# p[0] = p[1] + p[3]
# def p_forced_list(p):
# '''trules : trule etrules
# ntrules : ntrule entrules'''
# p[0] = [p[1]] + p[2]
def p_ruleset(p):
'''rulelist : rule NEWLINE rulelist
| rule DNEWLINE'''
if len(p) == 3:
p[0] = [p[1]]
else:
p[0] = [p[1]] + p[3]
def p_rule(p):
'''rule : RULE production'''
p[0] = Rule(p[1], p[2]._lhs, p[2]._rhs)
def p_tterm(p):
'''tterm : IDENT COLON numbers'''
global terminals
terminals.append(p[1])
p[0] = (p[1], p[3])
def p_ntterm(p):
'''ntterm : IDENT COLON numbers'''
global non_terminals
non_terminals.append(p[1])
p[0] = (p[1], p[3])
def p_state(p):
'''state : STATE DNEWLINE srulelist sactions sactions
| STATE DNEWLINE srulelist sactions
| STATE DNEWLINE srulelist DNEWLINE'''
actions = []
if isinstance(p[4], list):
actions.extend([(x, y) for (x, y) in p[4] if y is not None])
if len(p) >= 6:
actions.extend([(x, y) for (x, y) in p[5] if y is not None])
# make a dict of t- and nt-transitions
t = dict()
nt = dict()
for k, v in actions:
if k in non_terminals:
nt[k] = v
else:
t[k] = v
p[0] = State(p[1], p[3], t, nt)
# def p_state_no_t(p):
# '''state : STATE dnewline srulelist dnewline ntrules NEWLINE'''
# # make a dict of t- and nt-transitions
# t = dict()
# nt = dict()
# for k, v in p[6]:
# nt[k] = v
# p[0] = State(p[1], p[2], t, nt)
#
#
# def p_state_no_nt(p):
# '''state : STATE dnewline srulelist NEWLINE trules NEWLINE'''
# # make a dict of t- and nt-transitions
# t = dict()
# nt = dict()
# for k, v in p[5]:
# t[k] = v
# p[0] = State(p[1], p[2], t, nt)
def p_production(p):
'''production : IDENT RARROW rhs'''
p[0] = Production(p[1], p[3])
def p_rhs(p):
'''rhs : exp erhs'''
p[0] = [p[1]] + p[2]
def p_srule(p):
'''srule : LPAREN INT RPAREN production'''
p[0] = ParseState(p[2], p[4]._rhs.index('.'))
def p_action(p):
'''trule : IDENT operation
ntrule : IDENT operation
action : IDENT operation
| BANG IDENT LBRACKET operation RBRACKET'''
if len(p) == 6:
p[0] = (p[2], None)
else:
p[0] = (p[1], p[2])
def p_exp(p):
'''exp : DOT
| IDENT'''
p[0] = p[1]
def p_operation(p):
'''operation : SHIFT
| REDUCE LPAREN production RPAREN'''
if len(p) == 2:
p[0] = Shift(p[1])
else:
p[0] = Reduce(ParseState(p[1], position=p[3]._rhs.index('.')))
# Error rule for syntax errors
def p_error(p):
if not p:
print "End of File!"
return
print "Syntax error at token", p.type, "on line", p.lineno
sys.exit(1)
def p_empty(p):
'''empty : '''
pass
parser = yacc.yacc()
| false | true |
f71eecf30391d625a435cc49f57cec598c53b612 | 42,870 | py | Python | great_expectations/execution_engine/sqlalchemy_execution_engine.py | joeltone/great_expectations | ea9d68c33d955c44e3da619b627fd66f070909ff | [
"Apache-2.0"
] | null | null | null | great_expectations/execution_engine/sqlalchemy_execution_engine.py | joeltone/great_expectations | ea9d68c33d955c44e3da619b627fd66f070909ff | [
"Apache-2.0"
] | null | null | null | great_expectations/execution_engine/sqlalchemy_execution_engine.py | joeltone/great_expectations | ea9d68c33d955c44e3da619b627fd66f070909ff | [
"Apache-2.0"
] | null | null | null | import copy
import datetime
import logging
import traceback
import warnings
from pathlib import Path
from typing import Any, Dict, Iterable, List, Optional, Tuple, Union
from urllib.parse import urlparse
from great_expectations._version import get_versions # isort:skip
__version__ = get_versions()["version"] # isort:skip
del get_versions # isort:skip
from great_expectations.core import IDDict
from great_expectations.core.batch import BatchMarkers, BatchSpec
from great_expectations.core.batch_spec import (
RuntimeQueryBatchSpec,
SqlAlchemyDatasourceBatchSpec,
)
from great_expectations.core.util import convert_to_json_serializable
from great_expectations.data_context.types.base import ConcurrencyConfig
from great_expectations.exceptions import (
DatasourceKeyPairAuthBadPassphraseError,
ExecutionEngineError,
GreatExpectationsError,
InvalidBatchSpecError,
InvalidConfigError,
)
from great_expectations.execution_engine import ExecutionEngine
from great_expectations.execution_engine.execution_engine import MetricDomainTypes
from great_expectations.execution_engine.sqlalchemy_batch_data import (
SqlAlchemyBatchData,
)
from great_expectations.expectations.row_conditions import parse_condition_to_sqlalchemy
from great_expectations.util import filter_properties_dict, import_library_module
from great_expectations.validator.metric_configuration import MetricConfiguration
logger = logging.getLogger(__name__)
try:
import sqlalchemy as sa
except ImportError:
sa = None
try:
from sqlalchemy.engine import reflection
from sqlalchemy.engine.default import DefaultDialect
from sqlalchemy.engine.url import URL
from sqlalchemy.exc import OperationalError
from sqlalchemy.sql import Selectable
from sqlalchemy.sql.elements import TextClause, quoted_name
except ImportError:
reflection = None
DefaultDialect = None
Selectable = None
TextClause = None
quoted_name = None
OperationalError = None
try:
import psycopg2
import sqlalchemy.dialects.postgresql.psycopg2 as sqlalchemy_psycopg2
except (ImportError, KeyError):
sqlalchemy_psycopg2 = None
try:
import sqlalchemy_redshift.dialect
except ImportError:
sqlalchemy_redshift = None
try:
import snowflake.sqlalchemy.snowdialect
if sa:
# Sometimes "snowflake-sqlalchemy" fails to self-register in certain environments, so we do it explicitly.
# (see https://stackoverflow.com/questions/53284762/nosuchmoduleerror-cant-load-plugin-sqlalchemy-dialectssnowflake)
sa.dialects.registry.register("snowflake", "snowflake.sqlalchemy", "dialect")
except (ImportError, KeyError, AttributeError):
snowflake = None
try:
import pybigquery.sqlalchemy_bigquery
###
# NOTE: 20210816 - jdimatteo: A convention we rely on is for SqlAlchemy dialects
# to define an attribute "dialect". A PR has been submitted to fix this upstream
# with https://github.com/googleapis/python-bigquery-sqlalchemy/pull/251. If that
# fix isn't present, add this "dialect" attribute here:
if not hasattr(pybigquery.sqlalchemy_bigquery, "dialect"):
pybigquery.sqlalchemy_bigquery.dialect = (
pybigquery.sqlalchemy_bigquery.BigQueryDialect
)
# Sometimes "pybigquery.sqlalchemy_bigquery" fails to self-register in Azure (our CI/CD pipeline) in certain cases, so we do it explicitly.
# (see https://stackoverflow.com/questions/53284762/nosuchmoduleerror-cant-load-plugin-sqlalchemy-dialectssnowflake)
sa.dialects.registry.register(
"bigquery", "pybigquery.sqlalchemy_bigquery", "dialect"
)
try:
getattr(pybigquery.sqlalchemy_bigquery, "INTEGER")
bigquery_types_tuple = None
except AttributeError:
# In older versions of the pybigquery driver, types were not exported, so we use a hack
logger.warning(
"Old pybigquery driver version detected. Consider upgrading to 0.4.14 or later."
)
from collections import namedtuple
BigQueryTypes = namedtuple(
"BigQueryTypes", sorted(pybigquery.sqlalchemy_bigquery._type_map)
)
bigquery_types_tuple = BigQueryTypes(**pybigquery.sqlalchemy_bigquery._type_map)
except (ImportError, AttributeError):
bigquery_types_tuple = None
pybigquery = None
def _get_dialect_type_module(dialect):
"""Given a dialect, returns the dialect type, which is defines the engine/system that is used to communicates
with the database/database implementation. Currently checks for RedShift/BigQuery dialects"""
if dialect is None:
logger.warning(
"No sqlalchemy dialect found; relying in top-level sqlalchemy types."
)
return sa
try:
# Redshift does not (yet) export types to top level; only recognize base SA types
if isinstance(dialect, sqlalchemy_redshift.dialect.RedshiftDialect):
return dialect.sa
except (TypeError, AttributeError):
pass
# Bigquery works with newer versions, but use a patch if we had to define bigquery_types_tuple
try:
if (
isinstance(
dialect,
pybigquery.sqlalchemy_bigquery.BigQueryDialect,
)
and bigquery_types_tuple is not None
):
return bigquery_types_tuple
except (TypeError, AttributeError):
pass
return dialect
class SqlAlchemyExecutionEngine(ExecutionEngine):
def __init__(
self,
name=None,
credentials=None,
data_context=None,
engine=None,
connection_string=None,
url=None,
batch_data_dict=None,
create_temp_table=True,
concurrency: Optional[ConcurrencyConfig] = None,
**kwargs, # These will be passed as optional parameters to the SQLAlchemy engine, **not** the ExecutionEngine
):
"""Builds a SqlAlchemyExecutionEngine, using a provided connection string/url/engine/credentials to access the
desired database. Also initializes the dialect to be used and configures usage statistics.
Args:
name (str): \
The name of the SqlAlchemyExecutionEngine
credentials: \
If the Execution Engine is not provided, the credentials can be used to build the Execution
Engine. If the Engine is provided, it will be used instead
data_context (DataContext): \
An object representing a Great Expectations project that can be used to access Expectation
Suites and the Project Data itself
engine (Engine): \
A SqlAlchemy Engine used to set the SqlAlchemyExecutionEngine being configured, useful if an
Engine has already been configured and should be reused. Will override Credentials
if provided.
connection_string (string): \
If neither the engines nor the credentials have been provided, a connection string can be used
to access the data. This will be overridden by both the engine and credentials if those are
provided.
url (string): \
If neither the engines, the credentials, nor the connection_string have been provided,
a url can be used to access the data. This will be overridden by all other configuration
options if any are provided.
concurrency (ConcurrencyConfig): Concurrency config used to configure the sqlalchemy engine.
"""
super().__init__(name=name, batch_data_dict=batch_data_dict)
self._name = name
self._credentials = credentials
self._connection_string = connection_string
self._url = url
self._create_temp_table = create_temp_table
if engine is not None:
if credentials is not None:
logger.warning(
"Both credentials and engine were provided during initialization of SqlAlchemyExecutionEngine. "
"Ignoring credentials."
)
self.engine = engine
else:
concurrency = (
concurrency if concurrency is not None else ConcurrencyConfig()
)
concurrency.add_sqlalchemy_create_engine_parameters(kwargs)
if credentials is not None:
self.engine = self._build_engine(credentials=credentials, **kwargs)
elif connection_string is not None:
self.engine = sa.create_engine(connection_string, **kwargs)
elif url is not None:
self.drivername = urlparse(url).scheme
self.engine = sa.create_engine(url, **kwargs)
else:
raise InvalidConfigError(
"Credentials or an engine are required for a SqlAlchemyExecutionEngine."
)
# Get the dialect **for purposes of identifying types**
if self.engine.dialect.name.lower() in [
"postgresql",
"mysql",
"sqlite",
"oracle",
"mssql",
]:
# These are the officially included and supported dialects by sqlalchemy
self.dialect_module = import_library_module(
module_name="sqlalchemy.dialects." + self.engine.dialect.name
)
elif self.engine.dialect.name.lower() == "snowflake":
self.dialect_module = import_library_module(
module_name="snowflake.sqlalchemy.snowdialect"
)
elif self.engine.dialect.name.lower() == "redshift":
self.dialect_module = import_library_module(
module_name="sqlalchemy_redshift.dialect"
)
elif self.engine.dialect.name.lower() == "bigquery":
self.dialect_module = import_library_module(
module_name="pybigquery.sqlalchemy_bigquery"
)
else:
self.dialect_module = None
# <WILL> 20210726 - engine_backup is used by the snowflake connector, which requires connection and engine
# to be closed and disposed separately. Currently self.engine can refer to either a Connection or Engine,
# depending on the backend. This will need to be cleaned up in an upcoming refactor, so that Engine and
# Connection can be handled separately.
self._engine_backup = None
if self.engine and self.engine.dialect.name.lower() in [
"sqlite",
"mssql",
"snowflake",
"mysql",
]:
self._engine_backup = self.engine
# sqlite/mssql temp tables only persist within a connection so override the engine
self.engine = self.engine.connect()
# Send a connect event to provide dialect type
if data_context is not None and getattr(
data_context, "_usage_statistics_handler", None
):
handler = data_context._usage_statistics_handler
handler.send_usage_message(
event="execution_engine.sqlalchemy.connect",
event_payload={
"anonymized_name": handler._execution_engine_anonymizer.anonymize(
self.name
),
"sqlalchemy_dialect": self.engine.name,
},
success=True,
)
# Gather the call arguments of the present function (and add the "class_name"), filter out the Falsy values,
# and set the instance "_config" variable equal to the resulting dictionary.
self._config = {
"name": name,
"credentials": credentials,
"data_context": data_context,
"engine": engine,
"connection_string": connection_string,
"url": url,
"batch_data_dict": batch_data_dict,
"module_name": self.__class__.__module__,
"class_name": self.__class__.__name__,
}
self._config.update(kwargs)
filter_properties_dict(properties=self._config, clean_falsy=True, inplace=True)
@property
def credentials(self):
return self._credentials
@property
def connection_string(self):
return self._connection_string
@property
def url(self):
return self._url
def _build_engine(self, credentials, **kwargs) -> "sa.engine.Engine":
"""
Using a set of given credentials, constructs an Execution Engine , connecting to a database using a URL or a
private key path.
"""
# Update credentials with anything passed during connection time
drivername = credentials.pop("drivername")
schema_name = credentials.pop("schema_name", None)
if schema_name is not None:
logger.warning(
"schema_name specified creating a URL with schema is not supported. Set a default "
"schema on the user connecting to your database."
)
create_engine_kwargs = kwargs
connect_args = credentials.pop("connect_args", None)
if connect_args:
create_engine_kwargs["connect_args"] = connect_args
if "private_key_path" in credentials:
options, create_engine_kwargs = self._get_sqlalchemy_key_pair_auth_url(
drivername, credentials
)
else:
options = sa.engine.url.URL(drivername, **credentials)
self.drivername = drivername
engine = sa.create_engine(options, **create_engine_kwargs)
return engine
def _get_sqlalchemy_key_pair_auth_url(
self, drivername: str, credentials: dict
) -> Tuple["sa.engine.url.URL", Dict]:
"""
Utilizing a private key path and a passphrase in a given credentials dictionary, attempts to encode the provided
values into a private key. If passphrase is incorrect, this will fail and an exception is raised.
Args:
drivername(str) - The name of the driver class
credentials(dict) - A dictionary of database credentials used to access the database
Returns:
a tuple consisting of a url with the serialized key-pair authentication, and a dictionary of engine kwargs.
"""
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives import serialization
private_key_path = credentials.pop("private_key_path")
private_key_passphrase = credentials.pop("private_key_passphrase")
with Path(private_key_path).expanduser().resolve().open(mode="rb") as key:
try:
p_key = serialization.load_pem_private_key(
key.read(),
password=private_key_passphrase.encode()
if private_key_passphrase
else None,
backend=default_backend(),
)
except ValueError as e:
if "incorrect password" in str(e).lower():
raise DatasourceKeyPairAuthBadPassphraseError(
datasource_name="SqlAlchemyDatasource",
message="Decryption of key failed, was the passphrase incorrect?",
) from e
else:
raise e
pkb = p_key.private_bytes(
encoding=serialization.Encoding.DER,
format=serialization.PrivateFormat.PKCS8,
encryption_algorithm=serialization.NoEncryption(),
)
credentials_driver_name = credentials.pop("drivername", None)
create_engine_kwargs = {"connect_args": {"private_key": pkb}}
return (
sa.engine.url.URL(drivername or credentials_driver_name, **credentials),
create_engine_kwargs,
)
def get_domain_records(
self,
domain_kwargs: Dict,
) -> Selectable:
"""
Uses the given domain kwargs (which include row_condition, condition_parser, and ignore_row_if directives) to
obtain and/or query a batch. Returns in the format of an SqlAlchemy table/column(s) object.
Args:
domain_kwargs (dict) - A dictionary consisting of the domain kwargs specifying which data to obtain
Returns:
An SqlAlchemy table/column(s) (the selectable object for obtaining data on which to compute)
"""
batch_id = domain_kwargs.get("batch_id")
if batch_id is None:
# We allow no batch id specified if there is only one batch
if self.active_batch_data:
data_object = self.active_batch_data
else:
raise GreatExpectationsError(
"No batch is specified, but could not identify a loaded batch."
)
else:
if batch_id in self.loaded_batch_data_dict:
data_object = self.loaded_batch_data_dict[batch_id]
else:
raise GreatExpectationsError(
f"Unable to find batch with batch_id {batch_id}"
)
if "table" in domain_kwargs and domain_kwargs["table"] is not None:
# TODO: Add logic to handle record_set_name once implemented
# (i.e. multiple record sets (tables) in one batch
if domain_kwargs["table"] != data_object.selectable.name:
selectable = sa.Table(
domain_kwargs["table"],
sa.MetaData(),
schema_name=data_object._schema_name,
)
else:
selectable = data_object.selectable
elif "query" in domain_kwargs:
raise ValueError(
"query is not currently supported by SqlAlchemyExecutionEngine"
)
else:
selectable = data_object.selectable
# Filtering by row condition.
if (
"row_condition" in domain_kwargs
and domain_kwargs["row_condition"] is not None
):
condition_parser = domain_kwargs["condition_parser"]
if condition_parser == "great_expectations__experimental__":
parsed_condition = parse_condition_to_sqlalchemy(
domain_kwargs["row_condition"]
)
selectable = sa.select(
"*", from_obj=selectable, whereclause=parsed_condition
)
else:
raise GreatExpectationsError(
"SqlAlchemyExecutionEngine only supports the great_expectations condition_parser."
)
if "column" in domain_kwargs:
return selectable
if (
"column_A" in domain_kwargs
and "column_B" in domain_kwargs
and "ignore_row_if" in domain_kwargs
):
if self.active_batch_data.use_quoted_name:
# Checking if case-sensitive and using appropriate name
# noinspection PyPep8Naming
column_A_name = quoted_name(domain_kwargs["column_A"], quote=True)
# noinspection PyPep8Naming
column_B_name = quoted_name(domain_kwargs["column_B"], quote=True)
else:
# noinspection PyPep8Naming
column_A_name = domain_kwargs["column_A"]
# noinspection PyPep8Naming
column_B_name = domain_kwargs["column_B"]
ignore_row_if = domain_kwargs["ignore_row_if"]
if ignore_row_if == "both_values_are_missing":
selectable = (
sa.select([sa.text("*")])
.select_from(selectable)
.where(
sa.not_(
sa.and_(
sa.column(column_A_name) == None,
sa.column(column_B_name) == None,
)
)
)
)
elif ignore_row_if == "either_value_is_missing":
selectable = (
sa.select([sa.text("*")])
.select_from(selectable)
.where(
sa.not_(
sa.or_(
sa.column(column_A_name) == None,
sa.column(column_B_name) == None,
)
)
)
)
else:
if ignore_row_if not in ["neither", "never"]:
raise ValueError(
f'Unrecognized value of ignore_row_if ("{ignore_row_if}").'
)
if ignore_row_if == "never":
warnings.warn(
f"""The correct "no-action" value of the "ignore_row_if" directive for the column pair case is \
"neither" (the use of "{ignore_row_if}" will be deprecated). Please update code accordingly.
""",
DeprecationWarning,
)
return selectable
if "column_list" in domain_kwargs and "ignore_row_if" in domain_kwargs:
if self.active_batch_data.use_quoted_name:
# Checking if case-sensitive and using appropriate name
column_list = [
quoted_name(domain_kwargs[column_name], quote=True)
for column_name in domain_kwargs["column_list"]
]
else:
column_list = domain_kwargs["column_list"]
ignore_row_if = domain_kwargs["ignore_row_if"]
if ignore_row_if == "all_values_are_missing":
selectable = (
sa.select([sa.text("*")])
.select_from(selectable)
.where(
sa.not_(
sa.and_(
*(
sa.column(column_name) == None
for column_name in column_list
)
)
)
)
)
elif ignore_row_if == "any_value_is_missing":
selectable = (
sa.select([sa.text("*")])
.select_from(selectable)
.where(
sa.not_(
sa.or_(
*(
sa.column(column_name) == None
for column_name in column_list
)
)
)
)
)
else:
if ignore_row_if != "never":
raise ValueError(
f'Unrecognized value of ignore_row_if ("{ignore_row_if}").'
)
return selectable
return selectable
def get_compute_domain(
self,
domain_kwargs: Dict,
domain_type: Union[str, MetricDomainTypes],
accessor_keys: Optional[Iterable[str]] = None,
) -> Tuple[Selectable, dict, dict]:
"""Uses a given batch dictionary and domain kwargs to obtain a SqlAlchemy column object.
Args:
domain_kwargs (dict) - A dictionary consisting of the domain kwargs specifying which data to obtain
domain_type (str or MetricDomainTypes) - an Enum value indicating which metric domain the user would
like to be using, or a corresponding string value representing it. String types include "identity",
"column", "column_pair", "table" and "other". Enum types include capitalized versions of these from the
class MetricDomainTypes.
accessor_keys (str iterable) - keys that are part of the compute domain but should be ignored when
describing the domain and simply transferred with their associated values into accessor_domain_kwargs.
Returns:
SqlAlchemy column
"""
selectable = self.get_domain_records(
domain_kwargs=domain_kwargs,
)
# Extracting value from enum if it is given for future computation
domain_type = MetricDomainTypes(domain_type)
# Warning user if accessor keys are in any domain that is not of type table, will be ignored
if (
domain_type != MetricDomainTypes.TABLE
and accessor_keys is not None
and len(list(accessor_keys)) > 0
):
logger.warning(
'Accessor keys ignored since Metric Domain Type is not "table"'
)
compute_domain_kwargs = copy.deepcopy(domain_kwargs)
accessor_domain_kwargs = {}
if domain_type == MetricDomainTypes.TABLE:
if accessor_keys is not None and len(list(accessor_keys)) > 0:
for key in accessor_keys:
accessor_domain_kwargs[key] = compute_domain_kwargs.pop(key)
if len(domain_kwargs.keys()) > 0:
# Warn user if kwarg not "normal".
unexpected_keys: set = set(compute_domain_kwargs.keys()).difference(
{
"batch_id",
"table",
"row_condition",
"condition_parser",
}
)
if len(unexpected_keys) > 0:
unexpected_keys_str: str = ", ".join(
map(lambda element: f'"{element}"', unexpected_keys)
)
logger.warning(
f'Unexpected key(s) {unexpected_keys_str} found in domain_kwargs for domain type "{domain_type.value}".'
)
return selectable, compute_domain_kwargs, accessor_domain_kwargs
elif domain_type == MetricDomainTypes.COLUMN:
if "column" not in compute_domain_kwargs:
raise GreatExpectationsError(
"Column not provided in compute_domain_kwargs"
)
# Checking if case-sensitive and using appropriate name
if self.active_batch_data.use_quoted_name:
accessor_domain_kwargs["column"] = quoted_name(
compute_domain_kwargs.pop("column"), quote=True
)
else:
accessor_domain_kwargs["column"] = compute_domain_kwargs.pop("column")
return selectable, compute_domain_kwargs, accessor_domain_kwargs
elif domain_type == MetricDomainTypes.COLUMN_PAIR:
if not (
"column_A" in compute_domain_kwargs
and "column_B" in compute_domain_kwargs
):
raise GreatExpectationsError(
"column_A or column_B not found within compute_domain_kwargs"
)
# Checking if case-sensitive and using appropriate name
if self.active_batch_data.use_quoted_name:
accessor_domain_kwargs["column_A"] = quoted_name(
compute_domain_kwargs.pop("column_A"), quote=True
)
accessor_domain_kwargs["column_B"] = quoted_name(
compute_domain_kwargs.pop("column_B"), quote=True
)
else:
accessor_domain_kwargs["column_A"] = compute_domain_kwargs.pop(
"column_A"
)
accessor_domain_kwargs["column_B"] = compute_domain_kwargs.pop(
"column_B"
)
return selectable, compute_domain_kwargs, accessor_domain_kwargs
elif domain_type == MetricDomainTypes.MULTICOLUMN:
if "column_list" not in domain_kwargs:
raise GreatExpectationsError(
"column_list not found within domain_kwargs"
)
column_list = compute_domain_kwargs.pop("column_list")
if len(column_list) < 2:
raise GreatExpectationsError(
"column_list must contain at least 2 columns"
)
# Checking if case-sensitive and using appropriate name
if self.active_batch_data.use_quoted_name:
accessor_domain_kwargs["column_list"] = [
quoted_name(column_name, quote=True) for column_name in column_list
]
else:
accessor_domain_kwargs["column_list"] = column_list
return selectable, compute_domain_kwargs, accessor_domain_kwargs
# Letting selectable fall through
return selectable, compute_domain_kwargs, accessor_domain_kwargs
def resolve_metric_bundle(
self,
metric_fn_bundle: Iterable[Tuple[MetricConfiguration, Any, dict, dict]],
) -> dict:
"""For every metric in a set of Metrics to resolve, obtains necessary metric keyword arguments and builds
bundles of the metrics into one large query dictionary so that they are all executed simultaneously. Will fail
if bundling the metrics together is not possible.
Args:
metric_fn_bundle (Iterable[Tuple[MetricConfiguration, Callable, dict]): \
A Dictionary containing a MetricProvider's MetricConfiguration (its unique identifier), its metric provider function
(the function that actually executes the metric), and the arguments to pass to the metric provider function.
A dictionary of metrics defined in the registry and corresponding arguments
Returns:
A dictionary of metric names and their corresponding now-queried values.
"""
resolved_metrics = {}
# We need a different query for each domain (where clause).
queries: Dict[Tuple, dict] = {}
for (
metric_to_resolve,
engine_fn,
compute_domain_kwargs,
accessor_domain_kwargs,
metric_provider_kwargs,
) in metric_fn_bundle:
if not isinstance(compute_domain_kwargs, IDDict):
compute_domain_kwargs = IDDict(compute_domain_kwargs)
domain_id = compute_domain_kwargs.to_id()
if domain_id not in queries:
queries[domain_id] = {
"select": [],
"ids": [],
"domain_kwargs": compute_domain_kwargs,
}
queries[domain_id]["select"].append(
engine_fn.label(metric_to_resolve.metric_name)
)
queries[domain_id]["ids"].append(metric_to_resolve.id)
for query in queries.values():
domain_kwargs = query["domain_kwargs"]
selectable = self.get_domain_records(
domain_kwargs=domain_kwargs,
)
assert len(query["select"]) == len(query["ids"])
try:
res = self.engine.execute(
sa.select(query["select"]).select_from(selectable)
).fetchall()
logger.debug(
f"SqlAlchemyExecutionEngine computed {len(res[0])} metrics on domain_id {IDDict(domain_kwargs).to_id()}"
)
except OperationalError as oe:
exception_message: str = "An SQL execution Exception occurred. "
exception_traceback: str = traceback.format_exc()
exception_message += f'{type(oe).__name__}: "{str(oe)}". Traceback: "{exception_traceback}".'
logger.error(exception_message)
raise ExecutionEngineError(message=exception_message)
assert (
len(res) == 1
), "all bundle-computed metrics must be single-value statistics"
assert len(query["ids"]) == len(
res[0]
), "unexpected number of metrics returned"
for idx, id in enumerate(query["ids"]):
resolved_metrics[id] = convert_to_json_serializable(res[0][idx])
return resolved_metrics
def close(self):
"""
Note: Will 20210729
This is a helper function that will close and dispose Sqlalchemy objects that are used to connect to a database.
Databases like Snowflake require the connection and engine to be instantiated and closed separately, and not
doing so has caused problems with hanging connections.
Currently the ExecutionEngine does not support handling connections and engine separately, and will actually
override the engine with a connection in some cases, obfuscating what object is used to actually used by the
ExecutionEngine to connect to the external database. This will be handled in an upcoming refactor, which will
allow this function to eventually become:
self.connection.close()
self.engine.dispose()
More background can be found here: https://github.com/great-expectations/great_expectations/pull/3104/
"""
if self._engine_backup:
self.engine.close()
self._engine_backup.dispose()
else:
self.engine.dispose()
### Splitter methods for partitioning tables ###
def _split_on_whole_table(self, table_name: str, batch_identifiers: dict):
"""'Split' by returning the whole table"""
# return sa.column(column_name) == batch_identifiers[column_name]
return 1 == 1
def _split_on_column_value(
self, table_name: str, column_name: str, batch_identifiers: dict
):
"""Split using the values in the named column"""
return sa.column(column_name) == batch_identifiers[column_name]
def _split_on_converted_datetime(
self,
table_name: str,
column_name: str,
batch_identifiers: dict,
date_format_string: str = "%Y-%m-%d",
):
"""Convert the values in the named column to the given date_format, and split on that"""
return (
sa.func.strftime(
date_format_string,
sa.column(column_name),
)
== batch_identifiers[column_name]
)
def _split_on_divided_integer(
self, table_name: str, column_name: str, divisor: int, batch_identifiers: dict
):
"""Divide the values in the named column by `divisor`, and split on that"""
return (
sa.cast(sa.column(column_name) / divisor, sa.Integer)
== batch_identifiers[column_name]
)
def _split_on_mod_integer(
self, table_name: str, column_name: str, mod: int, batch_identifiers: dict
):
"""Divide the values in the named column by `divisor`, and split on that"""
return sa.column(column_name) % mod == batch_identifiers[column_name]
def _split_on_multi_column_values(
self, table_name: str, column_names: List[str], batch_identifiers: dict
):
"""Split on the joint values in the named columns"""
return sa.and_(
*(
sa.column(column_name) == column_value
for column_name, column_value in batch_identifiers.items()
)
)
def _split_on_hashed_column(
self,
table_name: str,
column_name: str,
hash_digits: int,
batch_identifiers: dict,
):
"""Split on the hashed value of the named column"""
return (
sa.func.right(sa.func.md5(sa.column(column_name)), hash_digits)
== batch_identifiers[column_name]
)
### Sampling methods ###
# _sample_using_limit
# _sample_using_random
# _sample_using_mod
# _sample_using_a_list
# _sample_using_md5
def _sample_using_random(
self,
p: float = 0.1,
):
"""Take a random sample of rows, retaining proportion p
Note: the Random function behaves differently on different dialects of SQL
"""
return sa.func.random() < p
def _sample_using_mod(
self,
column_name,
mod: int,
value: int,
):
"""Take the mod of named column, and only keep rows that match the given value"""
return sa.column(column_name) % mod == value
def _sample_using_a_list(
self,
column_name: str,
value_list: list,
):
"""Match the values in the named column against value_list, and only keep the matches"""
return sa.column(column_name).in_(value_list)
def _sample_using_md5(
self,
column_name: str,
hash_digits: int = 1,
hash_value: str = "f",
):
"""Hash the values in the named column, and split on that"""
return (
sa.func.right(
sa.func.md5(sa.cast(sa.column(column_name), sa.Text)), hash_digits
)
== hash_value
)
def _build_selectable_from_batch_spec(self, batch_spec) -> Union[Selectable, str]:
table_name: str = batch_spec["table_name"]
if "splitter_method" in batch_spec:
splitter_fn = getattr(self, batch_spec["splitter_method"])
split_clause = splitter_fn(
table_name=table_name,
batch_identifiers=batch_spec["batch_identifiers"],
**batch_spec["splitter_kwargs"],
)
else:
split_clause = True
if "sampling_method" in batch_spec:
if batch_spec["sampling_method"] == "_sample_using_limit":
# SQLalchemy's semantics for LIMIT are different than normal WHERE clauses,
# so the business logic for building the query needs to be different.
if self.engine.dialect.name.lower() == "oracle":
# limit doesn't compile properly for oracle so we will append rownum to query string later
raw_query = (
sa.select("*")
.select_from(
sa.table(
table_name, schema=batch_spec.get("schema_name", None)
)
)
.where(split_clause)
)
query = str(
raw_query.compile(
self.engine, compile_kwargs={"literal_binds": True}
)
)
query += "\nAND ROWNUM <= %d" % batch_spec["sampling_kwargs"]["n"]
return query
else:
return (
sa.select("*")
.select_from(
sa.table(
table_name, schema=batch_spec.get("schema_name", None)
)
)
.where(split_clause)
.limit(batch_spec["sampling_kwargs"]["n"])
)
else:
sampler_fn = getattr(self, batch_spec["sampling_method"])
return (
sa.select("*")
.select_from(
sa.table(table_name, schema=batch_spec.get("schema_name", None))
)
.where(
sa.and_(
split_clause,
sampler_fn(**batch_spec["sampling_kwargs"]),
)
)
)
return (
sa.select("*")
.select_from(
sa.table(table_name, schema=batch_spec.get("schema_name", None))
)
.where(split_clause)
)
def get_batch_data_and_markers(
self, batch_spec: BatchSpec
) -> Tuple[Any, BatchMarkers]:
if not isinstance(
batch_spec, (SqlAlchemyDatasourceBatchSpec, RuntimeQueryBatchSpec)
):
raise InvalidBatchSpecError(
f"""SqlAlchemyExecutionEngine accepts batch_spec only of type SqlAlchemyDatasourceBatchSpec or
RuntimeQueryBatchSpec (illegal type "{str(type(batch_spec))}" was received).
"""
)
batch_data: Optional[SqlAlchemyBatchData] = None
batch_markers: BatchMarkers = BatchMarkers(
{
"ge_load_time": datetime.datetime.now(datetime.timezone.utc).strftime(
"%Y%m%dT%H%M%S.%fZ"
)
}
)
temp_table_name: Optional[str]
if "bigquery_temp_table" in batch_spec:
temp_table_name = batch_spec.get("bigquery_temp_table")
else:
temp_table_name = None
source_table_name = batch_spec.get("table_name", None)
source_schema_name = batch_spec.get("schema_name", None)
if isinstance(batch_spec, RuntimeQueryBatchSpec):
# query != None is already checked when RuntimeQueryBatchSpec is instantiated
query: str = batch_spec.query
batch_spec.query = "SQLQuery"
batch_data = SqlAlchemyBatchData(
execution_engine=self,
query=query,
temp_table_name=temp_table_name,
create_temp_table=batch_spec.get(
"create_temp_table", self._create_temp_table
),
source_table_name=source_table_name,
source_schema_name=source_schema_name,
)
elif isinstance(batch_spec, SqlAlchemyDatasourceBatchSpec):
if self.engine.dialect.name.lower() == "oracle":
selectable: str = self._build_selectable_from_batch_spec(
batch_spec=batch_spec
)
else:
selectable: Selectable = self._build_selectable_from_batch_spec(
batch_spec=batch_spec
)
batch_data = SqlAlchemyBatchData(
execution_engine=self,
selectable=selectable,
temp_table_name=temp_table_name,
create_temp_table=batch_spec.get(
"create_temp_table", self._create_temp_table
),
source_table_name=source_table_name,
source_schema_name=source_schema_name,
)
return batch_data, batch_markers
| 40.519849 | 143 | 0.586937 | import copy
import datetime
import logging
import traceback
import warnings
from pathlib import Path
from typing import Any, Dict, Iterable, List, Optional, Tuple, Union
from urllib.parse import urlparse
from great_expectations._version import get_versions
__version__ = get_versions()["version"]
del get_versions
from great_expectations.core import IDDict
from great_expectations.core.batch import BatchMarkers, BatchSpec
from great_expectations.core.batch_spec import (
RuntimeQueryBatchSpec,
SqlAlchemyDatasourceBatchSpec,
)
from great_expectations.core.util import convert_to_json_serializable
from great_expectations.data_context.types.base import ConcurrencyConfig
from great_expectations.exceptions import (
DatasourceKeyPairAuthBadPassphraseError,
ExecutionEngineError,
GreatExpectationsError,
InvalidBatchSpecError,
InvalidConfigError,
)
from great_expectations.execution_engine import ExecutionEngine
from great_expectations.execution_engine.execution_engine import MetricDomainTypes
from great_expectations.execution_engine.sqlalchemy_batch_data import (
SqlAlchemyBatchData,
)
from great_expectations.expectations.row_conditions import parse_condition_to_sqlalchemy
from great_expectations.util import filter_properties_dict, import_library_module
from great_expectations.validator.metric_configuration import MetricConfiguration
logger = logging.getLogger(__name__)
try:
import sqlalchemy as sa
except ImportError:
sa = None
try:
from sqlalchemy.engine import reflection
from sqlalchemy.engine.default import DefaultDialect
from sqlalchemy.engine.url import URL
from sqlalchemy.exc import OperationalError
from sqlalchemy.sql import Selectable
from sqlalchemy.sql.elements import TextClause, quoted_name
except ImportError:
reflection = None
DefaultDialect = None
Selectable = None
TextClause = None
quoted_name = None
OperationalError = None
try:
import psycopg2
import sqlalchemy.dialects.postgresql.psycopg2 as sqlalchemy_psycopg2
except (ImportError, KeyError):
sqlalchemy_psycopg2 = None
try:
import sqlalchemy_redshift.dialect
except ImportError:
sqlalchemy_redshift = None
try:
import snowflake.sqlalchemy.snowdialect
if sa:
sa.dialects.registry.register("snowflake", "snowflake.sqlalchemy", "dialect")
except (ImportError, KeyError, AttributeError):
snowflake = None
try:
import pybigquery.sqlalchemy_bigquery
if not hasattr(pybigquery.sqlalchemy_bigquery, "dialect"):
pybigquery.sqlalchemy_bigquery.dialect = (
pybigquery.sqlalchemy_bigquery.BigQueryDialect
)
# Sometimes "pybigquery.sqlalchemy_bigquery" fails to self-register in Azure (our CI/CD pipeline) in certain cases, so we do it explicitly.
# (see https://stackoverflow.com/questions/53284762/nosuchmoduleerror-cant-load-plugin-sqlalchemy-dialectssnowflake)
sa.dialects.registry.register(
"bigquery", "pybigquery.sqlalchemy_bigquery", "dialect"
)
try:
getattr(pybigquery.sqlalchemy_bigquery, "INTEGER")
bigquery_types_tuple = None
except AttributeError:
# In older versions of the pybigquery driver, types were not exported, so we use a hack
logger.warning(
"Old pybigquery driver version detected. Consider upgrading to 0.4.14 or later."
)
from collections import namedtuple
BigQueryTypes = namedtuple(
"BigQueryTypes", sorted(pybigquery.sqlalchemy_bigquery._type_map)
)
bigquery_types_tuple = BigQueryTypes(**pybigquery.sqlalchemy_bigquery._type_map)
except (ImportError, AttributeError):
bigquery_types_tuple = None
pybigquery = None
def _get_dialect_type_module(dialect):
if dialect is None:
logger.warning(
"No sqlalchemy dialect found; relying in top-level sqlalchemy types."
)
return sa
try:
# Redshift does not (yet) export types to top level; only recognize base SA types
if isinstance(dialect, sqlalchemy_redshift.dialect.RedshiftDialect):
return dialect.sa
except (TypeError, AttributeError):
pass
# Bigquery works with newer versions, but use a patch if we had to define bigquery_types_tuple
try:
if (
isinstance(
dialect,
pybigquery.sqlalchemy_bigquery.BigQueryDialect,
)
and bigquery_types_tuple is not None
):
return bigquery_types_tuple
except (TypeError, AttributeError):
pass
return dialect
class SqlAlchemyExecutionEngine(ExecutionEngine):
def __init__(
self,
name=None,
credentials=None,
data_context=None,
engine=None,
connection_string=None,
url=None,
batch_data_dict=None,
create_temp_table=True,
concurrency: Optional[ConcurrencyConfig] = None,
**kwargs, # These will be passed as optional parameters to the SQLAlchemy engine, **not** the ExecutionEngine
):
super().__init__(name=name, batch_data_dict=batch_data_dict)
self._name = name
self._credentials = credentials
self._connection_string = connection_string
self._url = url
self._create_temp_table = create_temp_table
if engine is not None:
if credentials is not None:
logger.warning(
"Both credentials and engine were provided during initialization of SqlAlchemyExecutionEngine. "
"Ignoring credentials."
)
self.engine = engine
else:
concurrency = (
concurrency if concurrency is not None else ConcurrencyConfig()
)
concurrency.add_sqlalchemy_create_engine_parameters(kwargs)
if credentials is not None:
self.engine = self._build_engine(credentials=credentials, **kwargs)
elif connection_string is not None:
self.engine = sa.create_engine(connection_string, **kwargs)
elif url is not None:
self.drivername = urlparse(url).scheme
self.engine = sa.create_engine(url, **kwargs)
else:
raise InvalidConfigError(
"Credentials or an engine are required for a SqlAlchemyExecutionEngine."
)
# Get the dialect **for purposes of identifying types**
if self.engine.dialect.name.lower() in [
"postgresql",
"mysql",
"sqlite",
"oracle",
"mssql",
]:
# These are the officially included and supported dialects by sqlalchemy
self.dialect_module = import_library_module(
module_name="sqlalchemy.dialects." + self.engine.dialect.name
)
elif self.engine.dialect.name.lower() == "snowflake":
self.dialect_module = import_library_module(
module_name="snowflake.sqlalchemy.snowdialect"
)
elif self.engine.dialect.name.lower() == "redshift":
self.dialect_module = import_library_module(
module_name="sqlalchemy_redshift.dialect"
)
elif self.engine.dialect.name.lower() == "bigquery":
self.dialect_module = import_library_module(
module_name="pybigquery.sqlalchemy_bigquery"
)
else:
self.dialect_module = None
# <WILL> 20210726 - engine_backup is used by the snowflake connector, which requires connection and engine
# to be closed and disposed separately. Currently self.engine can refer to either a Connection or Engine,
# depending on the backend. This will need to be cleaned up in an upcoming refactor, so that Engine and
# Connection can be handled separately.
self._engine_backup = None
if self.engine and self.engine.dialect.name.lower() in [
"sqlite",
"mssql",
"snowflake",
"mysql",
]:
self._engine_backup = self.engine
# sqlite/mssql temp tables only persist within a connection so override the engine
self.engine = self.engine.connect()
# Send a connect event to provide dialect type
if data_context is not None and getattr(
data_context, "_usage_statistics_handler", None
):
handler = data_context._usage_statistics_handler
handler.send_usage_message(
event="execution_engine.sqlalchemy.connect",
event_payload={
"anonymized_name": handler._execution_engine_anonymizer.anonymize(
self.name
),
"sqlalchemy_dialect": self.engine.name,
},
success=True,
)
# Gather the call arguments of the present function (and add the "class_name"), filter out the Falsy values,
# and set the instance "_config" variable equal to the resulting dictionary.
self._config = {
"name": name,
"credentials": credentials,
"data_context": data_context,
"engine": engine,
"connection_string": connection_string,
"url": url,
"batch_data_dict": batch_data_dict,
"module_name": self.__class__.__module__,
"class_name": self.__class__.__name__,
}
self._config.update(kwargs)
filter_properties_dict(properties=self._config, clean_falsy=True, inplace=True)
@property
def credentials(self):
return self._credentials
@property
def connection_string(self):
return self._connection_string
@property
def url(self):
return self._url
def _build_engine(self, credentials, **kwargs) -> "sa.engine.Engine":
# Update credentials with anything passed during connection time
drivername = credentials.pop("drivername")
schema_name = credentials.pop("schema_name", None)
if schema_name is not None:
logger.warning(
"schema_name specified creating a URL with schema is not supported. Set a default "
"schema on the user connecting to your database."
)
create_engine_kwargs = kwargs
connect_args = credentials.pop("connect_args", None)
if connect_args:
create_engine_kwargs["connect_args"] = connect_args
if "private_key_path" in credentials:
options, create_engine_kwargs = self._get_sqlalchemy_key_pair_auth_url(
drivername, credentials
)
else:
options = sa.engine.url.URL(drivername, **credentials)
self.drivername = drivername
engine = sa.create_engine(options, **create_engine_kwargs)
return engine
def _get_sqlalchemy_key_pair_auth_url(
self, drivername: str, credentials: dict
) -> Tuple["sa.engine.url.URL", Dict]:
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives import serialization
private_key_path = credentials.pop("private_key_path")
private_key_passphrase = credentials.pop("private_key_passphrase")
with Path(private_key_path).expanduser().resolve().open(mode="rb") as key:
try:
p_key = serialization.load_pem_private_key(
key.read(),
password=private_key_passphrase.encode()
if private_key_passphrase
else None,
backend=default_backend(),
)
except ValueError as e:
if "incorrect password" in str(e).lower():
raise DatasourceKeyPairAuthBadPassphraseError(
datasource_name="SqlAlchemyDatasource",
message="Decryption of key failed, was the passphrase incorrect?",
) from e
else:
raise e
pkb = p_key.private_bytes(
encoding=serialization.Encoding.DER,
format=serialization.PrivateFormat.PKCS8,
encryption_algorithm=serialization.NoEncryption(),
)
credentials_driver_name = credentials.pop("drivername", None)
create_engine_kwargs = {"connect_args": {"private_key": pkb}}
return (
sa.engine.url.URL(drivername or credentials_driver_name, **credentials),
create_engine_kwargs,
)
def get_domain_records(
self,
domain_kwargs: Dict,
) -> Selectable:
batch_id = domain_kwargs.get("batch_id")
if batch_id is None:
# We allow no batch id specified if there is only one batch
if self.active_batch_data:
data_object = self.active_batch_data
else:
raise GreatExpectationsError(
"No batch is specified, but could not identify a loaded batch."
)
else:
if batch_id in self.loaded_batch_data_dict:
data_object = self.loaded_batch_data_dict[batch_id]
else:
raise GreatExpectationsError(
f"Unable to find batch with batch_id {batch_id}"
)
if "table" in domain_kwargs and domain_kwargs["table"] is not None:
# TODO: Add logic to handle record_set_name once implemented
# (i.e. multiple record sets (tables) in one batch
if domain_kwargs["table"] != data_object.selectable.name:
selectable = sa.Table(
domain_kwargs["table"],
sa.MetaData(),
schema_name=data_object._schema_name,
)
else:
selectable = data_object.selectable
elif "query" in domain_kwargs:
raise ValueError(
"query is not currently supported by SqlAlchemyExecutionEngine"
)
else:
selectable = data_object.selectable
# Filtering by row condition.
if (
"row_condition" in domain_kwargs
and domain_kwargs["row_condition"] is not None
):
condition_parser = domain_kwargs["condition_parser"]
if condition_parser == "great_expectations__experimental__":
parsed_condition = parse_condition_to_sqlalchemy(
domain_kwargs["row_condition"]
)
selectable = sa.select(
"*", from_obj=selectable, whereclause=parsed_condition
)
else:
raise GreatExpectationsError(
"SqlAlchemyExecutionEngine only supports the great_expectations condition_parser."
)
if "column" in domain_kwargs:
return selectable
if (
"column_A" in domain_kwargs
and "column_B" in domain_kwargs
and "ignore_row_if" in domain_kwargs
):
if self.active_batch_data.use_quoted_name:
# Checking if case-sensitive and using appropriate name
# noinspection PyPep8Naming
column_A_name = quoted_name(domain_kwargs["column_A"], quote=True)
# noinspection PyPep8Naming
column_B_name = quoted_name(domain_kwargs["column_B"], quote=True)
else:
# noinspection PyPep8Naming
column_A_name = domain_kwargs["column_A"]
# noinspection PyPep8Naming
column_B_name = domain_kwargs["column_B"]
ignore_row_if = domain_kwargs["ignore_row_if"]
if ignore_row_if == "both_values_are_missing":
selectable = (
sa.select([sa.text("*")])
.select_from(selectable)
.where(
sa.not_(
sa.and_(
sa.column(column_A_name) == None,
sa.column(column_B_name) == None,
)
)
)
)
elif ignore_row_if == "either_value_is_missing":
selectable = (
sa.select([sa.text("*")])
.select_from(selectable)
.where(
sa.not_(
sa.or_(
sa.column(column_A_name) == None,
sa.column(column_B_name) == None,
)
)
)
)
else:
if ignore_row_if not in ["neither", "never"]:
raise ValueError(
f'Unrecognized value of ignore_row_if ("{ignore_row_if}").'
)
if ignore_row_if == "never":
warnings.warn(
f"""The correct "no-action" value of the "ignore_row_if" directive for the column pair case is \
"neither" (the use of "{ignore_row_if}" will be deprecated). Please update code accordingly.
""",
DeprecationWarning,
)
return selectable
if "column_list" in domain_kwargs and "ignore_row_if" in domain_kwargs:
if self.active_batch_data.use_quoted_name:
# Checking if case-sensitive and using appropriate name
column_list = [
quoted_name(domain_kwargs[column_name], quote=True)
for column_name in domain_kwargs["column_list"]
]
else:
column_list = domain_kwargs["column_list"]
ignore_row_if = domain_kwargs["ignore_row_if"]
if ignore_row_if == "all_values_are_missing":
selectable = (
sa.select([sa.text("*")])
.select_from(selectable)
.where(
sa.not_(
sa.and_(
*(
sa.column(column_name) == None
for column_name in column_list
)
)
)
)
)
elif ignore_row_if == "any_value_is_missing":
selectable = (
sa.select([sa.text("*")])
.select_from(selectable)
.where(
sa.not_(
sa.or_(
*(
sa.column(column_name) == None
for column_name in column_list
)
)
)
)
)
else:
if ignore_row_if != "never":
raise ValueError(
f'Unrecognized value of ignore_row_if ("{ignore_row_if}").'
)
return selectable
return selectable
def get_compute_domain(
self,
domain_kwargs: Dict,
domain_type: Union[str, MetricDomainTypes],
accessor_keys: Optional[Iterable[str]] = None,
) -> Tuple[Selectable, dict, dict]:
selectable = self.get_domain_records(
domain_kwargs=domain_kwargs,
)
# Extracting value from enum if it is given for future computation
domain_type = MetricDomainTypes(domain_type)
# Warning user if accessor keys are in any domain that is not of type table, will be ignored
if (
domain_type != MetricDomainTypes.TABLE
and accessor_keys is not None
and len(list(accessor_keys)) > 0
):
logger.warning(
'Accessor keys ignored since Metric Domain Type is not "table"'
)
compute_domain_kwargs = copy.deepcopy(domain_kwargs)
accessor_domain_kwargs = {}
if domain_type == MetricDomainTypes.TABLE:
if accessor_keys is not None and len(list(accessor_keys)) > 0:
for key in accessor_keys:
accessor_domain_kwargs[key] = compute_domain_kwargs.pop(key)
if len(domain_kwargs.keys()) > 0:
# Warn user if kwarg not "normal".
unexpected_keys: set = set(compute_domain_kwargs.keys()).difference(
{
"batch_id",
"table",
"row_condition",
"condition_parser",
}
)
if len(unexpected_keys) > 0:
unexpected_keys_str: str = ", ".join(
map(lambda element: f'"{element}"', unexpected_keys)
)
logger.warning(
f'Unexpected key(s) {unexpected_keys_str} found in domain_kwargs for domain type "{domain_type.value}".'
)
return selectable, compute_domain_kwargs, accessor_domain_kwargs
elif domain_type == MetricDomainTypes.COLUMN:
if "column" not in compute_domain_kwargs:
raise GreatExpectationsError(
"Column not provided in compute_domain_kwargs"
)
# Checking if case-sensitive and using appropriate name
if self.active_batch_data.use_quoted_name:
accessor_domain_kwargs["column"] = quoted_name(
compute_domain_kwargs.pop("column"), quote=True
)
else:
accessor_domain_kwargs["column"] = compute_domain_kwargs.pop("column")
return selectable, compute_domain_kwargs, accessor_domain_kwargs
elif domain_type == MetricDomainTypes.COLUMN_PAIR:
if not (
"column_A" in compute_domain_kwargs
and "column_B" in compute_domain_kwargs
):
raise GreatExpectationsError(
"column_A or column_B not found within compute_domain_kwargs"
)
# Checking if case-sensitive and using appropriate name
if self.active_batch_data.use_quoted_name:
accessor_domain_kwargs["column_A"] = quoted_name(
compute_domain_kwargs.pop("column_A"), quote=True
)
accessor_domain_kwargs["column_B"] = quoted_name(
compute_domain_kwargs.pop("column_B"), quote=True
)
else:
accessor_domain_kwargs["column_A"] = compute_domain_kwargs.pop(
"column_A"
)
accessor_domain_kwargs["column_B"] = compute_domain_kwargs.pop(
"column_B"
)
return selectable, compute_domain_kwargs, accessor_domain_kwargs
elif domain_type == MetricDomainTypes.MULTICOLUMN:
if "column_list" not in domain_kwargs:
raise GreatExpectationsError(
"column_list not found within domain_kwargs"
)
column_list = compute_domain_kwargs.pop("column_list")
if len(column_list) < 2:
raise GreatExpectationsError(
"column_list must contain at least 2 columns"
)
# Checking if case-sensitive and using appropriate name
if self.active_batch_data.use_quoted_name:
accessor_domain_kwargs["column_list"] = [
quoted_name(column_name, quote=True) for column_name in column_list
]
else:
accessor_domain_kwargs["column_list"] = column_list
return selectable, compute_domain_kwargs, accessor_domain_kwargs
# Letting selectable fall through
return selectable, compute_domain_kwargs, accessor_domain_kwargs
def resolve_metric_bundle(
self,
metric_fn_bundle: Iterable[Tuple[MetricConfiguration, Any, dict, dict]],
) -> dict:
resolved_metrics = {}
# We need a different query for each domain (where clause).
queries: Dict[Tuple, dict] = {}
for (
metric_to_resolve,
engine_fn,
compute_domain_kwargs,
accessor_domain_kwargs,
metric_provider_kwargs,
) in metric_fn_bundle:
if not isinstance(compute_domain_kwargs, IDDict):
compute_domain_kwargs = IDDict(compute_domain_kwargs)
domain_id = compute_domain_kwargs.to_id()
if domain_id not in queries:
queries[domain_id] = {
"select": [],
"ids": [],
"domain_kwargs": compute_domain_kwargs,
}
queries[domain_id]["select"].append(
engine_fn.label(metric_to_resolve.metric_name)
)
queries[domain_id]["ids"].append(metric_to_resolve.id)
for query in queries.values():
domain_kwargs = query["domain_kwargs"]
selectable = self.get_domain_records(
domain_kwargs=domain_kwargs,
)
assert len(query["select"]) == len(query["ids"])
try:
res = self.engine.execute(
sa.select(query["select"]).select_from(selectable)
).fetchall()
logger.debug(
f"SqlAlchemyExecutionEngine computed {len(res[0])} metrics on domain_id {IDDict(domain_kwargs).to_id()}"
)
except OperationalError as oe:
exception_message: str = "An SQL execution Exception occurred. "
exception_traceback: str = traceback.format_exc()
exception_message += f'{type(oe).__name__}: "{str(oe)}". Traceback: "{exception_traceback}".'
logger.error(exception_message)
raise ExecutionEngineError(message=exception_message)
assert (
len(res) == 1
), "all bundle-computed metrics must be single-value statistics"
assert len(query["ids"]) == len(
res[0]
), "unexpected number of metrics returned"
for idx, id in enumerate(query["ids"]):
resolved_metrics[id] = convert_to_json_serializable(res[0][idx])
return resolved_metrics
def close(self):
if self._engine_backup:
self.engine.close()
self._engine_backup.dispose()
else:
self.engine.dispose()
### Splitter methods for partitioning tables ###
def _split_on_whole_table(self, table_name: str, batch_identifiers: dict):
# return sa.column(column_name) == batch_identifiers[column_name]
return 1 == 1
def _split_on_column_value(
self, table_name: str, column_name: str, batch_identifiers: dict
):
return sa.column(column_name) == batch_identifiers[column_name]
def _split_on_converted_datetime(
self,
table_name: str,
column_name: str,
batch_identifiers: dict,
date_format_string: str = "%Y-%m-%d",
):
return (
sa.func.strftime(
date_format_string,
sa.column(column_name),
)
== batch_identifiers[column_name]
)
def _split_on_divided_integer(
self, table_name: str, column_name: str, divisor: int, batch_identifiers: dict
):
return (
sa.cast(sa.column(column_name) / divisor, sa.Integer)
== batch_identifiers[column_name]
)
def _split_on_mod_integer(
self, table_name: str, column_name: str, mod: int, batch_identifiers: dict
):
return sa.column(column_name) % mod == batch_identifiers[column_name]
def _split_on_multi_column_values(
self, table_name: str, column_names: List[str], batch_identifiers: dict
):
return sa.and_(
*(
sa.column(column_name) == column_value
for column_name, column_value in batch_identifiers.items()
)
)
def _split_on_hashed_column(
self,
table_name: str,
column_name: str,
hash_digits: int,
batch_identifiers: dict,
):
return (
sa.func.right(sa.func.md5(sa.column(column_name)), hash_digits)
== batch_identifiers[column_name]
)
### Sampling methods ###
# _sample_using_limit
# _sample_using_random
# _sample_using_mod
# _sample_using_a_list
# _sample_using_md5
def _sample_using_random(
self,
p: float = 0.1,
):
return sa.func.random() < p
def _sample_using_mod(
self,
column_name,
mod: int,
value: int,
):
return sa.column(column_name) % mod == value
def _sample_using_a_list(
self,
column_name: str,
value_list: list,
):
return sa.column(column_name).in_(value_list)
def _sample_using_md5(
self,
column_name: str,
hash_digits: int = 1,
hash_value: str = "f",
):
return (
sa.func.right(
sa.func.md5(sa.cast(sa.column(column_name), sa.Text)), hash_digits
)
== hash_value
)
def _build_selectable_from_batch_spec(self, batch_spec) -> Union[Selectable, str]:
table_name: str = batch_spec["table_name"]
if "splitter_method" in batch_spec:
splitter_fn = getattr(self, batch_spec["splitter_method"])
split_clause = splitter_fn(
table_name=table_name,
batch_identifiers=batch_spec["batch_identifiers"],
**batch_spec["splitter_kwargs"],
)
else:
split_clause = True
if "sampling_method" in batch_spec:
if batch_spec["sampling_method"] == "_sample_using_limit":
# SQLalchemy's semantics for LIMIT are different than normal WHERE clauses,
if self.engine.dialect.name.lower() == "oracle":
raw_query = (
sa.select("*")
.select_from(
sa.table(
table_name, schema=batch_spec.get("schema_name", None)
)
)
.where(split_clause)
)
query = str(
raw_query.compile(
self.engine, compile_kwargs={"literal_binds": True}
)
)
query += "\nAND ROWNUM <= %d" % batch_spec["sampling_kwargs"]["n"]
return query
else:
return (
sa.select("*")
.select_from(
sa.table(
table_name, schema=batch_spec.get("schema_name", None)
)
)
.where(split_clause)
.limit(batch_spec["sampling_kwargs"]["n"])
)
else:
sampler_fn = getattr(self, batch_spec["sampling_method"])
return (
sa.select("*")
.select_from(
sa.table(table_name, schema=batch_spec.get("schema_name", None))
)
.where(
sa.and_(
split_clause,
sampler_fn(**batch_spec["sampling_kwargs"]),
)
)
)
return (
sa.select("*")
.select_from(
sa.table(table_name, schema=batch_spec.get("schema_name", None))
)
.where(split_clause)
)
def get_batch_data_and_markers(
self, batch_spec: BatchSpec
) -> Tuple[Any, BatchMarkers]:
if not isinstance(
batch_spec, (SqlAlchemyDatasourceBatchSpec, RuntimeQueryBatchSpec)
):
raise InvalidBatchSpecError(
f"""SqlAlchemyExecutionEngine accepts batch_spec only of type SqlAlchemyDatasourceBatchSpec or
RuntimeQueryBatchSpec (illegal type "{str(type(batch_spec))}" was received).
"""
)
batch_data: Optional[SqlAlchemyBatchData] = None
batch_markers: BatchMarkers = BatchMarkers(
{
"ge_load_time": datetime.datetime.now(datetime.timezone.utc).strftime(
"%Y%m%dT%H%M%S.%fZ"
)
}
)
temp_table_name: Optional[str]
if "bigquery_temp_table" in batch_spec:
temp_table_name = batch_spec.get("bigquery_temp_table")
else:
temp_table_name = None
source_table_name = batch_spec.get("table_name", None)
source_schema_name = batch_spec.get("schema_name", None)
if isinstance(batch_spec, RuntimeQueryBatchSpec):
# query != None is already checked when RuntimeQueryBatchSpec is instantiated
query: str = batch_spec.query
batch_spec.query = "SQLQuery"
batch_data = SqlAlchemyBatchData(
execution_engine=self,
query=query,
temp_table_name=temp_table_name,
create_temp_table=batch_spec.get(
"create_temp_table", self._create_temp_table
),
source_table_name=source_table_name,
source_schema_name=source_schema_name,
)
elif isinstance(batch_spec, SqlAlchemyDatasourceBatchSpec):
if self.engine.dialect.name.lower() == "oracle":
selectable: str = self._build_selectable_from_batch_spec(
batch_spec=batch_spec
)
else:
selectable: Selectable = self._build_selectable_from_batch_spec(
batch_spec=batch_spec
)
batch_data = SqlAlchemyBatchData(
execution_engine=self,
selectable=selectable,
temp_table_name=temp_table_name,
create_temp_table=batch_spec.get(
"create_temp_table", self._create_temp_table
),
source_table_name=source_table_name,
source_schema_name=source_schema_name,
)
return batch_data, batch_markers
| true | true |
f71eed9031059f80aef2fada7ae4f0a9856e325f | 1,967 | py | Python | Python/zc16 password.py | KIrayamoto/666 | 6b8f83312ea400fc6399da4d474ce9829059cfbf | [
"MIT"
] | 2 | 2020-10-27T14:31:48.000Z | 2022-03-05T10:27:47.000Z | Python/zc16 password.py | KIrayamoto/Program | b63cf5c614aa02bc59ea7e0c716282465823a1e6 | [
"MIT"
] | 1 | 2018-04-10T15:32:14.000Z | 2018-04-28T10:36:18.000Z | Python/zc16 password.py | KIrayamoto/666 | 6b8f83312ea400fc6399da4d474ce9829059cfbf | [
"MIT"
] | null | null | null | import math
a=str(input('输入一串数字或字母:',))
a=a.replace('a','01')
a=a.replace('b','02')
a=a.replace('c','03')
a=a.replace('d','04')
a=a.replace('e','05')
a=a.replace('f','06')
a=a.replace('g','07')
a=a.replace('h','08')
a=a.replace('i','09')
a=a.replace('j','10')
a=a.replace('k','11')
a=a.replace('l','12')
a=a.replace('m','13')
a=a.replace('n','14')
a=a.replace('o','15')
a=a.replace('p','16')
a=a.replace('q','17')
a=a.replace('r','18')
a=a.replace('s','19')
a=a.replace('t','20')
a=a.replace('u','21')
a=a.replace('v','22')
a=a.replace('w','23')
a=a.replace('x','24')
a=a.replace('y','25')
a=a.replace('z','26')
a=a.replace('A','27')
a=a.replace('B','28')
a=a.replace('C','29')
a=a.replace('D','30')
a=a.replace('E','31')
a=a.replace('F','32')
a=a.replace('G','33')
a=a.replace('H','34')
a=a.replace('I','35')
a=a.replace('J','36')
a=a.replace('K','37')
a=a.replace('L','38')
a=a.replace('M','39')
a=a.replace('N','40')
a=a.replace('O','41')
a=a.replace('P','42')
a=a.replace('Q','43')
a=a.replace('R','44')
a=a.replace('S','45')
a=a.replace('T','46')
a=a.replace('U','47')
a=a.replace('V','48')
a=a.replace('W','49')
a=a.replace('X','50')
a=a.replace('Y','51')
a=a.replace('Z','52')
a=float(a)
b=math.sin(a)*9987784
c=b/76823235675*math.cos(b)
d=c*56434344567*math.tan(c)
e=d/54854545464*math.sin(d)
f=e*66646464321/math.sin(e/1000000)
h=f*14457231234/math.cos(f/3333333)
i=h*15978712213/math.tan(h/1000414)
j=i*18964532189*math.sin(i)
k=j*51856453153*math.cos(j)
l=k*78978789898*math.tan(k)
m=l*12345623133/math.sin(l/2000000)
n=m*15345631841/math.cos(f/3342333)
o=n*15546342213/math.tan(n/1000858)
p=o*17894153123*math.sin(o)
q=p*23478923489*math.cos(p)
r=q*15231861231*math.tan(q)
b=str(b)
c=str(c)
d=str(d)
e=str(e)
f=str(f)
h=str(h)
i=str(i)
j=str(j)
k=str(k)
l=str(l)
m=str(m)
n=str(n)
o=str(o)
p=str(p)
q=str(q)
r=str(r)
print('加密后:','%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s'%(b[1],c[2],d[3],e[4],f[5],h[6],i[7],j[8],k[9],l[10],m[11],n[12],o[13],p[14],q[15],r[16]))
| 22.101124 | 137 | 0.597356 | import math
a=str(input('输入一串数字或字母:',))
a=a.replace('a','01')
a=a.replace('b','02')
a=a.replace('c','03')
a=a.replace('d','04')
a=a.replace('e','05')
a=a.replace('f','06')
a=a.replace('g','07')
a=a.replace('h','08')
a=a.replace('i','09')
a=a.replace('j','10')
a=a.replace('k','11')
a=a.replace('l','12')
a=a.replace('m','13')
a=a.replace('n','14')
a=a.replace('o','15')
a=a.replace('p','16')
a=a.replace('q','17')
a=a.replace('r','18')
a=a.replace('s','19')
a=a.replace('t','20')
a=a.replace('u','21')
a=a.replace('v','22')
a=a.replace('w','23')
a=a.replace('x','24')
a=a.replace('y','25')
a=a.replace('z','26')
a=a.replace('A','27')
a=a.replace('B','28')
a=a.replace('C','29')
a=a.replace('D','30')
a=a.replace('E','31')
a=a.replace('F','32')
a=a.replace('G','33')
a=a.replace('H','34')
a=a.replace('I','35')
a=a.replace('J','36')
a=a.replace('K','37')
a=a.replace('L','38')
a=a.replace('M','39')
a=a.replace('N','40')
a=a.replace('O','41')
a=a.replace('P','42')
a=a.replace('Q','43')
a=a.replace('R','44')
a=a.replace('S','45')
a=a.replace('T','46')
a=a.replace('U','47')
a=a.replace('V','48')
a=a.replace('W','49')
a=a.replace('X','50')
a=a.replace('Y','51')
a=a.replace('Z','52')
a=float(a)
b=math.sin(a)*9987784
c=b/76823235675*math.cos(b)
d=c*56434344567*math.tan(c)
e=d/54854545464*math.sin(d)
f=e*66646464321/math.sin(e/1000000)
h=f*14457231234/math.cos(f/3333333)
i=h*15978712213/math.tan(h/1000414)
j=i*18964532189*math.sin(i)
k=j*51856453153*math.cos(j)
l=k*78978789898*math.tan(k)
m=l*12345623133/math.sin(l/2000000)
n=m*15345631841/math.cos(f/3342333)
o=n*15546342213/math.tan(n/1000858)
p=o*17894153123*math.sin(o)
q=p*23478923489*math.cos(p)
r=q*15231861231*math.tan(q)
b=str(b)
c=str(c)
d=str(d)
e=str(e)
f=str(f)
h=str(h)
i=str(i)
j=str(j)
k=str(k)
l=str(l)
m=str(m)
n=str(n)
o=str(o)
p=str(p)
q=str(q)
r=str(r)
print('加密后:','%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s'%(b[1],c[2],d[3],e[4],f[5],h[6],i[7],j[8],k[9],l[10],m[11],n[12],o[13],p[14],q[15],r[16]))
| true | true |
f71eedd59815c48ae102ad9122ad01d977f6d026 | 4,679 | py | Python | tests/test_typing_compat.py | PrettyWood/typingx | 881b250da6e7629dce1e71efa153a5cfbfc9a13e | [
"MIT"
] | 46 | 2021-02-01T02:06:01.000Z | 2022-03-25T12:27:12.000Z | tests/test_typing_compat.py | PrettyWood/typing-extend | 881b250da6e7629dce1e71efa153a5cfbfc9a13e | [
"MIT"
] | 13 | 2021-02-14T13:16:32.000Z | 2022-03-29T12:36:00.000Z | tests/test_typing_compat.py | PrettyWood/typingx | 881b250da6e7629dce1e71efa153a5cfbfc9a13e | [
"MIT"
] | null | null | null | import collections
import pytest
from typingx import (
Annotated,
Any,
Callable,
Collection,
Constraints,
Dict,
FrozenSet,
Generic,
List,
Listx,
Literal,
Mapping,
NewType,
NoneType,
Optional,
Sequence,
Set,
Tuple,
Tuplex,
Type,
TypedDict,
TypeVar,
Union,
get_args,
get_origin,
is_literal,
is_newtype,
is_typeddict,
)
from typingx.typing_compat import display_type
class FullMovie(TypedDict):
name: str
year: int
S = TypeVar("S", int, str)
T = TypeVar("T")
U = TypeVar("U")
class StrangePair(Generic[T, S]):
...
@pytest.mark.parametrize(
"tp,expected_args",
[
(int, ()),
(Any, ()),
(Dict, ()),
(List, ()),
(Set, ()),
(FrozenSet, ()),
(Type, ()),
(Union[str, int], (str, int)),
(Optional[str], (str, NoneType)),
(List[int], (int,)),
(Dict[str, int], (str, int)),
(Set[str], (str,)),
(Type[int], (int,)),
(FullMovie, ()),
(Literal["pika"], ("pika",)),
(Literal["pika", Literal[Literal["bulbi"]]], ("pika", "bulbi")),
(Listx[str], (str,)),
(Listx[str, int], (str, int)),
(Listx[str, int, ...], (str, int, ...)),
(Listx[str, int, ..., bool], (str, int, ..., bool)),
(Tuplex[str], (str,)),
(Tuplex[str, int], (str, int)),
(Tuplex[str, int, ...], (str, int, ...)),
(Tuplex[str, int, ..., bool], (str, int, ..., bool)),
(Sequence[int], (int,)),
(Mapping[str, int], (str, int)),
(StrangePair[int, str], (int, str)),
(StrangePair, ()),
(Callable, ()),
(Callable[..., str], (..., str)),
(Callable[[int], str], ([int], str)),
(Union[int, T, str][float], (int, float, str)),
(Callable[[], T][int], ([], int)),
(Callable[[T], T][int], ([int], int)),
(Callable[[T, float], U][int, str], ([int, float], str)),
(List[Collection[T]][int], (Collection[int],)),
(
Mapping[T, Sequence[U]][str, int],
(
str,
Sequence[int],
),
),
(
Mapping[str, Mapping[T, Collection[U]]][float, int],
(
str,
Mapping[float, Collection[int]],
),
),
(Annotated[int, Constraints(ge=4)], (int, Constraints(ge=4))),
(Annotated[Union[int, float], Constraints(ge=4)], (Union[int, float], Constraints(ge=4))),
],
)
def test_get_args(tp, expected_args):
assert get_args(tp) == expected_args
@pytest.mark.parametrize(
"tp,expected_origin",
[
(int, None),
(Any, None),
(Dict, dict),
(List, list),
(Set, set),
(FrozenSet, frozenset),
(Tuple, tuple),
(Type, type),
(Union[str, int], Union),
(Optional[str], Union),
(List[int], list),
(Dict[str, int], dict),
(Set[str], set),
(Tuple[int], tuple),
(Type[int], type),
(FullMovie, None),
(Literal["pika"], Literal),
(Literal["pika", Literal[Literal["bulbi"]]], Literal),
(Listx[str, int, ...], list),
(Tuplex[str, int, ...], tuple),
(Sequence[int], collections.abc.Sequence),
(Mapping[str, int], collections.abc.Mapping),
(Generic, Generic),
(Generic[T], Generic),
(Union[T, int], Union),
(Union[T, int][str], Union),
(List[Tuple[T, T]][int], list),
(StrangePair[int, str], StrangePair),
(Callable, collections.abc.Callable),
(Callable[..., str], collections.abc.Callable),
(Callable[[int], str], collections.abc.Callable),
(Collection, collections.abc.Collection),
(Collection[int], collections.abc.Collection),
(Annotated[int, Constraints(ge=4)], Annotated),
(Annotated[Union[int, float], Constraints(ge=4)], Annotated),
],
)
def test_get_origin(tp, expected_origin):
assert get_origin(tp) == expected_origin
def test_is_literal():
assert is_literal(Literal["pika"]) is True
assert is_literal(int) is False
def test_is_typeddict():
assert is_typeddict(FullMovie) is True
assert is_typeddict(dict) is False
def test_is_newtype():
UserId = NewType("UserId", int)
assert is_newtype(UserId) is True
assert is_newtype(int) is False
def test_display_type():
assert display_type(int) == "int"
assert display_type(List[Dict[str, str]]) == "List[Dict[str, str]]"
assert display_type(Annotated[int, 3]) == "Annotated[int, 3]"
| 26.435028 | 98 | 0.518059 | import collections
import pytest
from typingx import (
Annotated,
Any,
Callable,
Collection,
Constraints,
Dict,
FrozenSet,
Generic,
List,
Listx,
Literal,
Mapping,
NewType,
NoneType,
Optional,
Sequence,
Set,
Tuple,
Tuplex,
Type,
TypedDict,
TypeVar,
Union,
get_args,
get_origin,
is_literal,
is_newtype,
is_typeddict,
)
from typingx.typing_compat import display_type
class FullMovie(TypedDict):
name: str
year: int
S = TypeVar("S", int, str)
T = TypeVar("T")
U = TypeVar("U")
class StrangePair(Generic[T, S]):
...
@pytest.mark.parametrize(
"tp,expected_args",
[
(int, ()),
(Any, ()),
(Dict, ()),
(List, ()),
(Set, ()),
(FrozenSet, ()),
(Type, ()),
(Union[str, int], (str, int)),
(Optional[str], (str, NoneType)),
(List[int], (int,)),
(Dict[str, int], (str, int)),
(Set[str], (str,)),
(Type[int], (int,)),
(FullMovie, ()),
(Literal["pika"], ("pika",)),
(Literal["pika", Literal[Literal["bulbi"]]], ("pika", "bulbi")),
(Listx[str], (str,)),
(Listx[str, int], (str, int)),
(Listx[str, int, ...], (str, int, ...)),
(Listx[str, int, ..., bool], (str, int, ..., bool)),
(Tuplex[str], (str,)),
(Tuplex[str, int], (str, int)),
(Tuplex[str, int, ...], (str, int, ...)),
(Tuplex[str, int, ..., bool], (str, int, ..., bool)),
(Sequence[int], (int,)),
(Mapping[str, int], (str, int)),
(StrangePair[int, str], (int, str)),
(StrangePair, ()),
(Callable, ()),
(Callable[..., str], (..., str)),
(Callable[[int], str], ([int], str)),
(Union[int, T, str][float], (int, float, str)),
(Callable[[], T][int], ([], int)),
(Callable[[T], T][int], ([int], int)),
(Callable[[T, float], U][int, str], ([int, float], str)),
(List[Collection[T]][int], (Collection[int],)),
(
Mapping[T, Sequence[U]][str, int],
(
str,
Sequence[int],
),
),
(
Mapping[str, Mapping[T, Collection[U]]][float, int],
(
str,
Mapping[float, Collection[int]],
),
),
(Annotated[int, Constraints(ge=4)], (int, Constraints(ge=4))),
(Annotated[Union[int, float], Constraints(ge=4)], (Union[int, float], Constraints(ge=4))),
],
)
def test_get_args(tp, expected_args):
assert get_args(tp) == expected_args
@pytest.mark.parametrize(
"tp,expected_origin",
[
(int, None),
(Any, None),
(Dict, dict),
(List, list),
(Set, set),
(FrozenSet, frozenset),
(Tuple, tuple),
(Type, type),
(Union[str, int], Union),
(Optional[str], Union),
(List[int], list),
(Dict[str, int], dict),
(Set[str], set),
(Tuple[int], tuple),
(Type[int], type),
(FullMovie, None),
(Literal["pika"], Literal),
(Literal["pika", Literal[Literal["bulbi"]]], Literal),
(Listx[str, int, ...], list),
(Tuplex[str, int, ...], tuple),
(Sequence[int], collections.abc.Sequence),
(Mapping[str, int], collections.abc.Mapping),
(Generic, Generic),
(Generic[T], Generic),
(Union[T, int], Union),
(Union[T, int][str], Union),
(List[Tuple[T, T]][int], list),
(StrangePair[int, str], StrangePair),
(Callable, collections.abc.Callable),
(Callable[..., str], collections.abc.Callable),
(Callable[[int], str], collections.abc.Callable),
(Collection, collections.abc.Collection),
(Collection[int], collections.abc.Collection),
(Annotated[int, Constraints(ge=4)], Annotated),
(Annotated[Union[int, float], Constraints(ge=4)], Annotated),
],
)
def test_get_origin(tp, expected_origin):
assert get_origin(tp) == expected_origin
def test_is_literal():
assert is_literal(Literal["pika"]) is True
assert is_literal(int) is False
def test_is_typeddict():
assert is_typeddict(FullMovie) is True
assert is_typeddict(dict) is False
def test_is_newtype():
UserId = NewType("UserId", int)
assert is_newtype(UserId) is True
assert is_newtype(int) is False
def test_display_type():
assert display_type(int) == "int"
assert display_type(List[Dict[str, str]]) == "List[Dict[str, str]]"
assert display_type(Annotated[int, 3]) == "Annotated[int, 3]"
| true | true |
f71eef5e3b50cc1a621eae8d1fbd631562a1ba66 | 11,433 | py | Python | examples/roberta_coarsness_NER_CRF_train.py | 6666ev/bert_seq2seq | caa9b6c5629ae5783c733aebbbcf669d8ab5dde2 | [
"Apache-2.0"
] | 795 | 2020-03-13T10:16:26.000Z | 2022-03-30T02:07:57.000Z | examples/roberta_coarsness_NER_CRF_train.py | 6666ev/bert_seq2seq | caa9b6c5629ae5783c733aebbbcf669d8ab5dde2 | [
"Apache-2.0"
] | 51 | 2020-03-23T09:09:45.000Z | 2022-03-24T02:44:53.000Z | examples/roberta_coarsness_NER_CRF_train.py | 6666ev/bert_seq2seq | caa9b6c5629ae5783c733aebbbcf669d8ab5dde2 | [
"Apache-2.0"
] | 152 | 2020-04-01T04:37:18.000Z | 2022-03-31T03:22:14.000Z | ## 粗粒度ner加crf层的例子
import torch
from tqdm import tqdm
import unicodedata
import os
import time
from torch.utils.data import Dataset, DataLoader
from bert_seq2seq import Tokenizer, load_chinese_base_vocab
from bert_seq2seq import load_bert
data_path = "./state_dict/corase_train_update.txt"
vocab_path = "./state_dict/roberta_wwm_vocab.txt" # roberta模型字典的位置
model_name = "roberta" # 选择模型名字
model_path = "./state_dict/roberta_wwm_pytorch_model.bin" # roberta模型位置
recent_model_path = "" # 用于把已经训练好的模型继续训练
model_save_path = "./bert_粗粒度ner_crf.bin"
batch_size = 4
lr = 1e-5
word2idx = load_chinese_base_vocab(vocab_path)
target = ["O", "B-LOC", "I-LOC", "B-PER", "I-PER", "B-ORG", "I-ORG"]
def _is_punctuation(ch):
"""标点符号类字符判断(全/半角均在此内)
"""
code = ord(ch)
return 33 <= code <= 47 or \
58 <= code <= 64 or \
91 <= code <= 96 or \
123 <= code <= 126 or \
unicodedata.category(ch).startswith('P')
def _cjk_punctuation():
return u'\uff02\uff03\uff04\uff05\uff06\uff07\uff08\uff09\uff0a\uff0b\uff0c\uff0d\uff0f\uff1a\uff1b\uff1c\uff1d\uff1e\uff20\uff3b\uff3c\uff3d\uff3e\uff3f\uff40\uff5b\uff5c\uff5d\uff5e\uff5f\uff60\uff62\uff63\uff64\u3000\u3001\u3003\u3008\u3009\u300a\u300b\u300c\u300d\u300e\u300f\u3010\u3011\u3014\u3015\u3016\u3017\u3018\u3019\u301a\u301b\u301c\u301d\u301e\u301f\u3030\u303e\u303f\u2013\u2014\u2018\u2019\u201b\u201c\u201d\u201e\u201f\u2026\u2027\ufe4f\ufe51\ufe54\xb7\uff01\uff1f\uff61\u3002'
def _is_cjk_character(ch):
"""CJK类字符判断(包括中文字符也在此列)
参考:https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
"""
code = ord(ch)
return 0x4E00 <= code <= 0x9FFF or \
0x3400 <= code <= 0x4DBF or \
0x20000 <= code <= 0x2A6DF or \
0x2A700 <= code <= 0x2B73F or \
0x2B740 <= code <= 0x2B81F or \
0x2B820 <= code <= 0x2CEAF or \
0xF900 <= code <= 0xFAFF or \
0x2F800 <= code <= 0x2FA1F
def _is_control(ch):
"""控制类字符判断
"""
return unicodedata.category(ch) in ('Cc', 'Cf')
def word_piece_tokenize(word):
"""word内分成subword
"""
if word in word2idx:
return [word]
tokens = []
start, stop = 0, 0
while start < len(word):
stop = len(word)
while stop > start:
sub = word[start:stop]
if start > 0:
sub = '##' + sub
if sub in word2idx:
break
stop -= 1
if start == stop:
stop += 1
tokens.append(sub)
start = stop
return tokens
def read_corpus(data_path):
"""
读原始数据
"""
sents_src = []
sents_tgt = []
with open(data_path, encoding="utf-8") as f:
lines = f.readlines()
row = ""
t = []
for line in lines:
if line == "\n":
if len(row) < 300:
sents_src.append(row)
sents_tgt.append(t)
row = ""
t = []
continue
line = line.split(" ")
row = row + line[0]
t.append(line[1].strip("\n"))
return sents_src, sents_tgt
## 自定义dataset
class NERDataset(Dataset):
"""
针对特定数据集,定义一个相关的取数据的方式
"""
def __init__(self, sents_src, sents_tgt) :
## 一般init函数是加载所有数据
super(NERDataset, self).__init__()
# 读原始数据
# self.sents_src, self.sents_tgt = read_corpus(poem_corpus_dir)
self.sents_src = sents_src
self.sents_tgt = sents_tgt
self.idx2word = {k: v for v, k in word2idx.items()}
self.tokenizer = Tokenizer(word2idx)
def __getitem__(self, i):
## 得到单个数据
# print(i)
src = self.sents_src[i]
tgt = self.sents_tgt[i]
tgt = ["O"] + tgt + ["O"]
tgt = [target.index(i) for i in tgt ]
token_ids, token_type_ids = self.tokenizer.encode(src)
if len(token_ids) != len(tgt):
print("not equal")
os._exit(0)
output = {
"token_ids": token_ids,
"token_type_ids": token_type_ids,
"target_id": tgt
}
return output
def __len__(self):
return len(self.sents_src)
def collate_fn(batch):
"""
动态padding, batch为一部分sample
"""
def padding(indice, max_length, pad_idx=0):
"""
pad 函数
"""
pad_indice = [item + [pad_idx] * max(0, max_length - len(item)) for item in indice]
return torch.tensor(pad_indice)
token_ids = [data["token_ids"] for data in batch]
max_length = max([len(t) for t in token_ids])
token_type_ids = [data["token_type_ids"] for data in batch]
target_ids = [data["target_id"] for data in batch]
token_ids_padded = padding(token_ids, max_length)
token_type_ids_padded = padding(token_type_ids, max_length)
target_ids_padded = padding(target_ids, max_length)
return token_ids_padded, token_type_ids_padded, target_ids_padded
def viterbi_decode(nodes, trans):
"""
维特比算法 解码
nodes: (seq_len, target_size)
trans: (target_size, target_size)
"""
scores = nodes[0]
scores[1:] -= 100000 # 刚开始标签肯定是"O"
target_size = nodes.shape[1]
seq_len = nodes.shape[0]
labels = torch.arange(0, target_size).view(1, -1)
path = labels
for l in range(1, seq_len):
scores = scores.view(-1, 1)
M = scores + trans + nodes[l].view(1, -1)
scores, ids = M.max(0)
path = torch.cat((path[:, ids], labels), dim=0)
# print(scores)
# print(scores)
return path[:, scores.argmax()]
def ner_print(model, test_data, device="cpu"):
model.eval()
idxtword = {v: k for k, v in word2idx.items()}
tokenier = Tokenizer(word2idx)
trans = model.state_dict()["crf_layer.trans"]
for text in test_data:
decode = []
text_encode, text_ids = tokenier.encode(text)
text_tensor = torch.tensor(text_encode, device=device).view(1, -1)
out = model(text_tensor).squeeze(0) # 其实是nodes
labels = viterbi_decode(out, trans)
starting = False
for l in labels:
if l > 0:
label = target[l.item()]
if label[0] == "B":
decode.append(label[2: ])
starting = True
elif starting:
decode.append(label[2: ])
else:
starting = False
decode.append("O")
else :
decode.append("O")
flag = 0
res = {}
text_decode = [idxtword[i] for i in text_encode]
for index, each_entity in enumerate(decode):
if each_entity != "O":
if flag != each_entity:
# cur_text = "".join([text[t] for t in mapping[index]])
cur_text = text_decode[index]
if each_entity in res.keys():
res[each_entity].append(cur_text)
else :
res[each_entity] = [cur_text]
flag = each_entity
elif flag == each_entity:
res[each_entity][-1] += text_decode[index]
# res[each_entity][-1] += "".join([text[t] for t in mapping[index]])
else :
flag = 0
print(res)
class Trainer:
def __init__(self):
# 加载数据
self.sents_src, self.sents_tgt = read_corpus(data_path)
self.tokenier = Tokenizer(word2idx)
# 判断是否有可用GPU
self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
print("device: " + str(self.device))
# 定义模型
self.bert_model = load_bert(word2idx, model_name=model_name, model_class="sequence_labeling_crf", target_size=len(target))
## 加载预训练的模型参数~
self.bert_model.load_pretrain_params(model_path)
# 将模型发送到计算设备(GPU或CPU)
self.bert_model.set_device(self.device)
# 声明需要优化的参数
self.optim_parameters = list(self.bert_model.parameters())
self.optimizer = torch.optim.Adam(self.optim_parameters, lr=lr, weight_decay=1e-3)
# 声明自定义的数据加载器
dataset = NERDataset(self.sents_src, self.sents_tgt)
self.dataloader = DataLoader(dataset, batch_size=batch_size, shuffle=True, collate_fn=collate_fn)
def train(self, epoch):
# 一个epoch的训练
self.bert_model.train()
self.iteration(epoch, dataloader=self.dataloader, train=True)
def save(self, save_path):
"""
保存模型
"""
self.bert_model.save_all_params(save_path)
print("{} saved!".format(save_path))
def iteration(self, epoch, dataloader, train=True):
total_loss = 0
start_time = time.time() ## 得到当前时间
step = 0
for token_ids, token_type_ids, target_ids in tqdm(dataloader,position=0, leave=True):
# print(target_ids.shape)
step += 1
if step % 500 == 0:
test_data = ["日寇在京掠夺文物详情。", "以书结缘,把欧美,港台流行的食品类食谱汇集一堂。", "明天天津下雨,不知道主任还能不能来学校吃个饭。"]
ner_print(self.bert_model, test_data, device=self.device)
self.bert_model.train()
# 因为传入了target标签,因此会计算loss并且返回
predictions, loss = self.bert_model(token_ids,
labels=target_ids
)
# 反向传播
if train:
# 清空之前的梯度
self.optimizer.zero_grad()
# 反向传播, 获取新的梯度
loss.backward()
# 用获取的梯度更新模型参数
self.optimizer.step()
# 为计算当前epoch的平均loss
total_loss += loss.item()
end_time = time.time()
spend_time = end_time - start_time
# 打印训练信息
print("epoch is " + str(epoch)+". loss is " + str(total_loss) + ". spend time is "+ str(spend_time))
# 保存模型
self.save(model_save_path)
if __name__ == '__main__':
trainer = Trainer()
train_epoches = 25
for epoch in range(train_epoches):
# 训练一个epoch
trainer.train(epoch)
# with open("./state_dict/corase_train_update.txt", "a+") as f:
# with open("./corpus/粗粒度NER/人民日报ner数据.txt", "r", encoding="utf-8") as f1 :
# lines = f1.readlines()
# start = 1
# string = ""
# label = ""
# for line in lines:
# if line == "\n":
# f.write("\n")
# continue
# line = line.strip("\n")
# line = line.split(" ")
# if _is_punctuation(line[0]) or _is_cjk_character(line[0]):
# if string != "":
# string = string.lower()
# tokens = word_piece_tokenize(string) # 子词
# for t in tokens:
# if "##" in t:
# f.write(t[2:] + " " + label + "\n")
# else :
# f.write(t + " " + label + "\n")
# # f.write(string + " " + label + "\n")
# string = ""
# label = ""
# f.write(line[0] + " " + line[1] + "\n")
# else :
# string += line[0]
# label = line[1] | 33.332362 | 498 | 0.542202 | om tqdm import tqdm
import unicodedata
import os
import time
from torch.utils.data import Dataset, DataLoader
from bert_seq2seq import Tokenizer, load_chinese_base_vocab
from bert_seq2seq import load_bert
data_path = "./state_dict/corase_train_update.txt"
vocab_path = "./state_dict/roberta_wwm_vocab.txt"
model_name = "roberta"
model_path = "./state_dict/roberta_wwm_pytorch_model.bin"
recent_model_path = ""
model_save_path = "./bert_粗粒度ner_crf.bin"
batch_size = 4
lr = 1e-5
word2idx = load_chinese_base_vocab(vocab_path)
target = ["O", "B-LOC", "I-LOC", "B-PER", "I-PER", "B-ORG", "I-ORG"]
def _is_punctuation(ch):
code = ord(ch)
return 33 <= code <= 47 or \
58 <= code <= 64 or \
91 <= code <= 96 or \
123 <= code <= 126 or \
unicodedata.category(ch).startswith('P')
def _cjk_punctuation():
return u'\uff02\uff03\uff04\uff05\uff06\uff07\uff08\uff09\uff0a\uff0b\uff0c\uff0d\uff0f\uff1a\uff1b\uff1c\uff1d\uff1e\uff20\uff3b\uff3c\uff3d\uff3e\uff3f\uff40\uff5b\uff5c\uff5d\uff5e\uff5f\uff60\uff62\uff63\uff64\u3000\u3001\u3003\u3008\u3009\u300a\u300b\u300c\u300d\u300e\u300f\u3010\u3011\u3014\u3015\u3016\u3017\u3018\u3019\u301a\u301b\u301c\u301d\u301e\u301f\u3030\u303e\u303f\u2013\u2014\u2018\u2019\u201b\u201c\u201d\u201e\u201f\u2026\u2027\ufe4f\ufe51\ufe54\xb7\uff01\uff1f\uff61\u3002'
def _is_cjk_character(ch):
code = ord(ch)
return 0x4E00 <= code <= 0x9FFF or \
0x3400 <= code <= 0x4DBF or \
0x20000 <= code <= 0x2A6DF or \
0x2A700 <= code <= 0x2B73F or \
0x2B740 <= code <= 0x2B81F or \
0x2B820 <= code <= 0x2CEAF or \
0xF900 <= code <= 0xFAFF or \
0x2F800 <= code <= 0x2FA1F
def _is_control(ch):
return unicodedata.category(ch) in ('Cc', 'Cf')
def word_piece_tokenize(word):
if word in word2idx:
return [word]
tokens = []
start, stop = 0, 0
while start < len(word):
stop = len(word)
while stop > start:
sub = word[start:stop]
if start > 0:
sub = '##' + sub
if sub in word2idx:
break
stop -= 1
if start == stop:
stop += 1
tokens.append(sub)
start = stop
return tokens
def read_corpus(data_path):
sents_src = []
sents_tgt = []
with open(data_path, encoding="utf-8") as f:
lines = f.readlines()
row = ""
t = []
for line in lines:
if line == "\n":
if len(row) < 300:
sents_src.append(row)
sents_tgt.append(t)
row = ""
t = []
continue
line = line.split(" ")
row = row + line[0]
t.append(line[1].strip("\n"))
return sents_src, sents_tgt
taset(Dataset):
def __init__(self, sents_src, sents_tgt) :
RDataset, self).__init__()
self.sents_src = sents_src
self.sents_tgt = sents_tgt
self.idx2word = {k: v for v, k in word2idx.items()}
self.tokenizer = Tokenizer(word2idx)
def __getitem__(self, i):
src = self.sents_src[i]
tgt = self.sents_tgt[i]
tgt = ["O"] + tgt + ["O"]
tgt = [target.index(i) for i in tgt ]
token_ids, token_type_ids = self.tokenizer.encode(src)
if len(token_ids) != len(tgt):
print("not equal")
os._exit(0)
output = {
"token_ids": token_ids,
"token_type_ids": token_type_ids,
"target_id": tgt
}
return output
def __len__(self):
return len(self.sents_src)
def collate_fn(batch):
def padding(indice, max_length, pad_idx=0):
pad_indice = [item + [pad_idx] * max(0, max_length - len(item)) for item in indice]
return torch.tensor(pad_indice)
token_ids = [data["token_ids"] for data in batch]
max_length = max([len(t) for t in token_ids])
token_type_ids = [data["token_type_ids"] for data in batch]
target_ids = [data["target_id"] for data in batch]
token_ids_padded = padding(token_ids, max_length)
token_type_ids_padded = padding(token_type_ids, max_length)
target_ids_padded = padding(target_ids, max_length)
return token_ids_padded, token_type_ids_padded, target_ids_padded
def viterbi_decode(nodes, trans):
scores = nodes[0]
scores[1:] -= 100000
target_size = nodes.shape[1]
seq_len = nodes.shape[0]
labels = torch.arange(0, target_size).view(1, -1)
path = labels
for l in range(1, seq_len):
scores = scores.view(-1, 1)
M = scores + trans + nodes[l].view(1, -1)
scores, ids = M.max(0)
path = torch.cat((path[:, ids], labels), dim=0)
return path[:, scores.argmax()]
def ner_print(model, test_data, device="cpu"):
model.eval()
idxtword = {v: k for k, v in word2idx.items()}
tokenier = Tokenizer(word2idx)
trans = model.state_dict()["crf_layer.trans"]
for text in test_data:
decode = []
text_encode, text_ids = tokenier.encode(text)
text_tensor = torch.tensor(text_encode, device=device).view(1, -1)
out = model(text_tensor).squeeze(0)
labels = viterbi_decode(out, trans)
starting = False
for l in labels:
if l > 0:
label = target[l.item()]
if label[0] == "B":
decode.append(label[2: ])
starting = True
elif starting:
decode.append(label[2: ])
else:
starting = False
decode.append("O")
else :
decode.append("O")
flag = 0
res = {}
text_decode = [idxtword[i] for i in text_encode]
for index, each_entity in enumerate(decode):
if each_entity != "O":
if flag != each_entity:
cur_text = text_decode[index]
if each_entity in res.keys():
res[each_entity].append(cur_text)
else :
res[each_entity] = [cur_text]
flag = each_entity
elif flag == each_entity:
res[each_entity][-1] += text_decode[index]
else :
flag = 0
print(res)
class Trainer:
def __init__(self):
self.sents_src, self.sents_tgt = read_corpus(data_path)
self.tokenier = Tokenizer(word2idx)
self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
print("device: " + str(self.device))
self.bert_model = load_bert(word2idx, model_name=model_name, model_class="sequence_labeling_crf", target_size=len(target))
.bert_model.load_pretrain_params(model_path)
self.bert_model.set_device(self.device)
self.optim_parameters = list(self.bert_model.parameters())
self.optimizer = torch.optim.Adam(self.optim_parameters, lr=lr, weight_decay=1e-3)
dataset = NERDataset(self.sents_src, self.sents_tgt)
self.dataloader = DataLoader(dataset, batch_size=batch_size, shuffle=True, collate_fn=collate_fn)
def train(self, epoch):
self.bert_model.train()
self.iteration(epoch, dataloader=self.dataloader, train=True)
def save(self, save_path):
self.bert_model.save_all_params(save_path)
print("{} saved!".format(save_path))
def iteration(self, epoch, dataloader, train=True):
total_loss = 0
start_time = time.time() step = 0
for token_ids, token_type_ids, target_ids in tqdm(dataloader,position=0, leave=True):
step += 1
if step % 500 == 0:
test_data = ["日寇在京掠夺文物详情。", "以书结缘,把欧美,港台流行的食品类食谱汇集一堂。", "明天天津下雨,不知道主任还能不能来学校吃个饭。"]
ner_print(self.bert_model, test_data, device=self.device)
self.bert_model.train()
predictions, loss = self.bert_model(token_ids,
labels=target_ids
)
if train:
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
total_loss += loss.item()
end_time = time.time()
spend_time = end_time - start_time
print("epoch is " + str(epoch)+". loss is " + str(total_loss) + ". spend time is "+ str(spend_time))
self.save(model_save_path)
if __name__ == '__main__':
trainer = Trainer()
train_epoches = 25
for epoch in range(train_epoches):
trainer.train(epoch)
| true | true |
f71ef1685e3a5f1c9663866f64524d16649aeb78 | 3,206 | py | Python | msgraph_async/common/tests/test_odata_query.py | blbuford/msgraph-async | c368fbef230a45d173e260775a693df456e60aa9 | [
"MIT"
] | 5 | 2021-02-28T08:54:09.000Z | 2022-03-21T16:00:35.000Z | msgraph_async/common/tests/test_odata_query.py | blbuford/msgraph-async | c368fbef230a45d173e260775a693df456e60aa9 | [
"MIT"
] | 1 | 2021-04-01T16:16:11.000Z | 2021-04-01T16:16:11.000Z | msgraph_async/common/tests/test_odata_query.py | blbuford/msgraph-async | c368fbef230a45d173e260775a693df456e60aa9 | [
"MIT"
] | 1 | 2021-03-31T22:50:19.000Z | 2021-03-31T22:50:19.000Z | import unittest
from msgraph_async.common.odata_query import *
class TestODataQuery(unittest.TestCase):
def setUp(self):
pass
@classmethod
def setUpClass(cls):
pass
def get_instance(self):
return ODataQuery()
def test_empty_odata(self):
i = self.get_instance()
self.assertEqual("EMPTY OPEN DATA QUERY", str(i))
def test_count_set_bad_value(self):
i = self.get_instance()
try:
i.count = 1
self.fail()
except ValueError:
pass
def test_count_set(self):
i = self.get_instance()
i.count = True
def test_expand_set_bad_value(self):
i = self.get_instance()
try:
i.expand = 1
self.fail()
except ValueError:
pass
def test_expand_set(self):
i = self.get_instance()
i.expand = "groups"
def test_filter_set_bad_value(self):
i = self.get_instance()
try:
i.filter = 1
self.fail()
except ValueError:
pass
def test_select_set_bad_value(self):
i = self.get_instance()
try:
i.select = 1
self.fail()
except ValueError:
pass
def test_select_set_bad_value2(self):
i = self.get_instance()
try:
i.select = ["valid", 10]
self.fail()
except ValueError:
pass
def test_select_set(self):
i = self.get_instance()
i.select = ["firstName", "lastName"]
def test_top_set_bad_value(self):
i = self.get_instance()
try:
i.top = "10"
self.fail()
except ValueError:
pass
def test_top_set(self):
i = self.get_instance()
i.top = 10
def test_top_query(self):
i = self.get_instance()
i.top = 10
self.assertEqual(str(i), "?$top=10")
def test_top_and_select_query(self):
i = self.get_instance()
i.top = 10
i.select = ["subject", "sender"]
self.assertEqual(str(i), "?$select=subject,sender&$top=10")
def test_filter_query(self):
i = self.get_instance()
constrain1 = Constrain("city", LogicalOperator.NE, "New-York")
constrain2 = Constrain("displayName", LogicalOperator.EQ, "Noam Meirovitch")
f = Filter([constrain1, constrain2], LogicalConnector.OR)
i.filter = f
self.assertEqual("?$filter=city ne New-York or displayName eq Noam Meirovitch", str(i))
def test_count_expand_filter_select_top_query(self):
i = self.get_instance()
constrain1 = Constrain("city", LogicalOperator.NE, "New-York")
constrain2 = Constrain("displayName", LogicalOperator.EQ, "Noam Meirovitch")
f = Filter([constrain1, constrain2], LogicalConnector.OR)
i.count = True
i.expand = "groups"
i.filter = f
i.top = 15
i.select = ["displayName", "firstName", "lastName"]
self.assertEqual("?$count=true&$expand=groups&$filter=city ne New-York or displayName eq Noam Meirovitch&$select=displayName,firstName,lastName&$top=15", str(i))
| 26.278689 | 169 | 0.577979 | import unittest
from msgraph_async.common.odata_query import *
class TestODataQuery(unittest.TestCase):
def setUp(self):
pass
@classmethod
def setUpClass(cls):
pass
def get_instance(self):
return ODataQuery()
def test_empty_odata(self):
i = self.get_instance()
self.assertEqual("EMPTY OPEN DATA QUERY", str(i))
def test_count_set_bad_value(self):
i = self.get_instance()
try:
i.count = 1
self.fail()
except ValueError:
pass
def test_count_set(self):
i = self.get_instance()
i.count = True
def test_expand_set_bad_value(self):
i = self.get_instance()
try:
i.expand = 1
self.fail()
except ValueError:
pass
def test_expand_set(self):
i = self.get_instance()
i.expand = "groups"
def test_filter_set_bad_value(self):
i = self.get_instance()
try:
i.filter = 1
self.fail()
except ValueError:
pass
def test_select_set_bad_value(self):
i = self.get_instance()
try:
i.select = 1
self.fail()
except ValueError:
pass
def test_select_set_bad_value2(self):
i = self.get_instance()
try:
i.select = ["valid", 10]
self.fail()
except ValueError:
pass
def test_select_set(self):
i = self.get_instance()
i.select = ["firstName", "lastName"]
def test_top_set_bad_value(self):
i = self.get_instance()
try:
i.top = "10"
self.fail()
except ValueError:
pass
def test_top_set(self):
i = self.get_instance()
i.top = 10
def test_top_query(self):
i = self.get_instance()
i.top = 10
self.assertEqual(str(i), "?$top=10")
def test_top_and_select_query(self):
i = self.get_instance()
i.top = 10
i.select = ["subject", "sender"]
self.assertEqual(str(i), "?$select=subject,sender&$top=10")
def test_filter_query(self):
i = self.get_instance()
constrain1 = Constrain("city", LogicalOperator.NE, "New-York")
constrain2 = Constrain("displayName", LogicalOperator.EQ, "Noam Meirovitch")
f = Filter([constrain1, constrain2], LogicalConnector.OR)
i.filter = f
self.assertEqual("?$filter=city ne New-York or displayName eq Noam Meirovitch", str(i))
def test_count_expand_filter_select_top_query(self):
i = self.get_instance()
constrain1 = Constrain("city", LogicalOperator.NE, "New-York")
constrain2 = Constrain("displayName", LogicalOperator.EQ, "Noam Meirovitch")
f = Filter([constrain1, constrain2], LogicalConnector.OR)
i.count = True
i.expand = "groups"
i.filter = f
i.top = 15
i.select = ["displayName", "firstName", "lastName"]
self.assertEqual("?$count=true&$expand=groups&$filter=city ne New-York or displayName eq Noam Meirovitch&$select=displayName,firstName,lastName&$top=15", str(i))
| true | true |
f71ef27549495f887aeb09772634895ce12eecb3 | 3,612 | py | Python | src/backend/settings.py | HectorPulido/nlu-brain-api | 1e0a3adbb7401b717d7b6ac13de75c761d1a87cf | [
"MIT"
] | 3 | 2021-09-14T17:38:55.000Z | 2022-01-02T06:06:57.000Z | src/backend/settings.py | HectorPulido/nlu-brain-api | 1e0a3adbb7401b717d7b6ac13de75c761d1a87cf | [
"MIT"
] | null | null | null | src/backend/settings.py | HectorPulido/nlu-brain-api | 1e0a3adbb7401b717d7b6ac13de75c761d1a87cf | [
"MIT"
] | null | null | null | """
Django settings for backend project.
Generated by 'django-admin startproject' using Django 3.1.1.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.1/ref/settings/
"""
import os
import dj_database_url
from pathlib import Path
from chatbot.nlu_engine import NLUEngine
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = os.environ.get("SECRET_KEY", default="foo")
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = int(os.environ.get("DEBUG", default=1))
ALLOWED_HOSTS = ["*"]
# Application definition
INSTALLED_APPS = [
"django.contrib.admin",
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.sessions",
"django.contrib.messages",
"django.contrib.staticfiles",
"backend",
]
MIDDLEWARE = [
"django.middleware.security.SecurityMiddleware",
"whitenoise.middleware.WhiteNoiseMiddleware",
"django.contrib.sessions.middleware.SessionMiddleware",
"django.middleware.common.CommonMiddleware",
"django.contrib.auth.middleware.AuthenticationMiddleware",
"django.contrib.messages.middleware.MessageMiddleware",
"django.middleware.clickjacking.XFrameOptionsMiddleware",
]
ROOT_URLCONF = "backend.urls"
TEMPLATES = [
{
"BACKEND": "django.template.backends.django.DjangoTemplates",
"DIRS": [os.path.join(BASE_DIR, "frontend/build")],
"APP_DIRS": True,
"OPTIONS": {
"context_processors": [
"django.template.context_processors.debug",
"django.template.context_processors.request",
"django.contrib.auth.context_processors.auth",
"django.contrib.messages.context_processors.messages",
],
},
},
]
WSGI_APPLICATION = "backend.wsgi.application"
# Database
# https://docs.djangoproject.com/en/3.1/ref/settings/#databases
DATABASES = {
"default": {
"ENGINE": "django.db.backends.sqlite3",
"NAME": BASE_DIR / "db.sqlite3",
}
}
DATABASE_URL = os.environ.get("DATABASE_URL")
db_from_env = dj_database_url.config(
default=DATABASE_URL, conn_max_age=500, ssl_require=True
)
DATABASES["default"].update(db_from_env)
# Password validation
# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
"NAME": "django.contrib.auth.password_validation.UserAttributeSimilarityValidator",
},
{
"NAME": "django.contrib.auth.password_validation.MinimumLengthValidator",
},
{
"NAME": "django.contrib.auth.password_validation.CommonPasswordValidator",
},
{
"NAME": "django.contrib.auth.password_validation.NumericPasswordValidator",
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.1/topics/i18n/
LANGUAGE_CODE = "en-us"
TIME_ZONE = "UTC"
USE_I18N = True
USE_L10N = True
USE_TZ = True
DEFAULT_AUTO_FIELD = "django.db.models.AutoField"
APPEND_SLASH = False
STATIC_ROOT = os.path.join(BASE_DIR, "staticfiles")
STATICFILES_STORAGE = "whitenoise.storage.CompressedManifestStaticFilesStorage"
STATIC_URL = "/static/"
CHATBOT = NLUEngine(
engine_path="engine.nlu", dataset_path="dataset.yaml", language="es"
)
| 26.364964 | 91 | 0.712071 |
import os
import dj_database_url
from pathlib import Path
from chatbot.nlu_engine import NLUEngine
BASE_DIR = Path(__file__).resolve().parent.parent
SECRET_KEY = os.environ.get("SECRET_KEY", default="foo")
DEBUG = int(os.environ.get("DEBUG", default=1))
ALLOWED_HOSTS = ["*"]
# Application definition
INSTALLED_APPS = [
"django.contrib.admin",
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.sessions",
"django.contrib.messages",
"django.contrib.staticfiles",
"backend",
]
MIDDLEWARE = [
"django.middleware.security.SecurityMiddleware",
"whitenoise.middleware.WhiteNoiseMiddleware",
"django.contrib.sessions.middleware.SessionMiddleware",
"django.middleware.common.CommonMiddleware",
"django.contrib.auth.middleware.AuthenticationMiddleware",
"django.contrib.messages.middleware.MessageMiddleware",
"django.middleware.clickjacking.XFrameOptionsMiddleware",
]
ROOT_URLCONF = "backend.urls"
TEMPLATES = [
{
"BACKEND": "django.template.backends.django.DjangoTemplates",
"DIRS": [os.path.join(BASE_DIR, "frontend/build")],
"APP_DIRS": True,
"OPTIONS": {
"context_processors": [
"django.template.context_processors.debug",
"django.template.context_processors.request",
"django.contrib.auth.context_processors.auth",
"django.contrib.messages.context_processors.messages",
],
},
},
]
WSGI_APPLICATION = "backend.wsgi.application"
# Database
# https://docs.djangoproject.com/en/3.1/ref/settings/#databases
DATABASES = {
"default": {
"ENGINE": "django.db.backends.sqlite3",
"NAME": BASE_DIR / "db.sqlite3",
}
}
DATABASE_URL = os.environ.get("DATABASE_URL")
db_from_env = dj_database_url.config(
default=DATABASE_URL, conn_max_age=500, ssl_require=True
)
DATABASES["default"].update(db_from_env)
# Password validation
# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
"NAME": "django.contrib.auth.password_validation.UserAttributeSimilarityValidator",
},
{
"NAME": "django.contrib.auth.password_validation.MinimumLengthValidator",
},
{
"NAME": "django.contrib.auth.password_validation.CommonPasswordValidator",
},
{
"NAME": "django.contrib.auth.password_validation.NumericPasswordValidator",
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.1/topics/i18n/
LANGUAGE_CODE = "en-us"
TIME_ZONE = "UTC"
USE_I18N = True
USE_L10N = True
USE_TZ = True
DEFAULT_AUTO_FIELD = "django.db.models.AutoField"
APPEND_SLASH = False
STATIC_ROOT = os.path.join(BASE_DIR, "staticfiles")
STATICFILES_STORAGE = "whitenoise.storage.CompressedManifestStaticFilesStorage"
STATIC_URL = "/static/"
CHATBOT = NLUEngine(
engine_path="engine.nlu", dataset_path="dataset.yaml", language="es"
)
| true | true |
f71ef33a954ffb0394694494df32c893a8f5ed2a | 3,335 | py | Python | scripts/releaser_hooks.py | jmelloy/django-photologue | b0a3072e331cdbf2240db0b8a5a91f31c04fee64 | [
"BSD-3-Clause"
] | 1 | 2021-01-21T15:25:20.000Z | 2021-01-21T15:25:20.000Z | scripts/releaser_hooks.py | jmelloy/django-photologue | b0a3072e331cdbf2240db0b8a5a91f31c04fee64 | [
"BSD-3-Clause"
] | null | null | null | scripts/releaser_hooks.py | jmelloy/django-photologue | b0a3072e331cdbf2240db0b8a5a91f31c04fee64 | [
"BSD-3-Clause"
] | null | null | null | import os
import subprocess
try:
import polib
except ImportError:
print('Msg to the package releaser: prerelease hooks will not work as you have not installed polib.')
raise
import copy
import codecs
def prereleaser_before(data):
"""
1. Run the unit tests one last time before we make a release.
2. Update the CONTRIBUTORS.txt file.
Note: Install * polib (https://pypi.python.org/pypi/polib).
* pep8.
"""
print('Running unit tests.')
subprocess.check_output(["python", "example_project/manage.py", "test", "photologue"])
print('Running flake8 check.')
# See setup.cfg for configuration options.
subprocess.check_output(["flake8"])
print('Checking that we have no outstanding DB migrations.')
output = subprocess.check_output(["python", "example_project/manage.py", "makemigrations", "--dry-run",
"photologue"])
if not output == b"No changes detected in app 'photologue'\n":
raise Exception('There are outstanding migrations for Photologue.')
print('Updating CONTRIBUTORS.txt')
# This command will get the author of every commit.
output = subprocess.check_output(["git", "log", "--format='%aN'"])
# Convert to a list.
contributors_list = [contributor.strip("'") for contributor in output.decode('utf-8').split('\n')]
# Now add info from the translator files. This is incomplete, we can only list
# the 'last contributor' to each translation.
for language in os.listdir('photologue/locale/'):
filename = 'photologue/locale/{}/LC_MESSAGES/django.po'.format(language)
po = polib.pofile(filename)
last_translator = po.metadata['Last-Translator']
contributors_list.append(last_translator[:last_translator.find('<') - 1])
# Now we want to only show each contributor once, and to list them by how many
# contributions they have made - a rough guide to the effort they have put in.
contributors_dict = {}
for author in contributors_list:
author_copy = copy.copy(author)
if author_copy in ('', '(no author)', 'FULL NAME'):
# Skip bad data.
continue
# The creator of this project should always appear first in the list - so
# don't add him to this list, but hard-code his name.
if author_copy in ('Justin Driscoll', 'justin.driscoll'):
continue
# Handle contributors who appear under multiple names.
if author_copy == 'richardbarran':
author_copy = 'Richard Barran'
if author_copy in contributors_dict:
contributors_dict[author_copy] += 1
else:
contributors_dict[author_copy] = 1
with codecs.open('CONTRIBUTORS.txt', 'w', encoding='utf8') as f:
f.write('Photologue is made possible by all the people who have contributed'
' to it. A non-exhaustive list follows:\n\n')
f.write('Justin Driscoll\n')
for i in sorted(contributors_dict, key=contributors_dict.get, reverse=True):
f.write(i + '\n')
# And commit the new contributors file.
if subprocess.check_output(["git", "diff", "CONTRIBUTORS.txt"]):
subprocess.check_output(["git", "commit", "-m", "Updated the list of contributors.", "CONTRIBUTORS.txt"])
| 38.77907 | 113 | 0.653073 | import os
import subprocess
try:
import polib
except ImportError:
print('Msg to the package releaser: prerelease hooks will not work as you have not installed polib.')
raise
import copy
import codecs
def prereleaser_before(data):
print('Running unit tests.')
subprocess.check_output(["python", "example_project/manage.py", "test", "photologue"])
print('Running flake8 check.')
subprocess.check_output(["flake8"])
print('Checking that we have no outstanding DB migrations.')
output = subprocess.check_output(["python", "example_project/manage.py", "makemigrations", "--dry-run",
"photologue"])
if not output == b"No changes detected in app 'photologue'\n":
raise Exception('There are outstanding migrations for Photologue.')
print('Updating CONTRIBUTORS.txt')
output = subprocess.check_output(["git", "log", "--format='%aN'"])
contributors_list = [contributor.strip("'") for contributor in output.decode('utf-8').split('\n')]
# Now add info from the translator files. This is incomplete, we can only list
# the 'last contributor' to each translation.
for language in os.listdir('photologue/locale/'):
filename = 'photologue/locale/{}/LC_MESSAGES/django.po'.format(language)
po = polib.pofile(filename)
last_translator = po.metadata['Last-Translator']
contributors_list.append(last_translator[:last_translator.find('<') - 1])
# Now we want to only show each contributor once, and to list them by how many
# contributions they have made - a rough guide to the effort they have put in.
contributors_dict = {}
for author in contributors_list:
author_copy = copy.copy(author)
if author_copy in ('', '(no author)', 'FULL NAME'):
# Skip bad data.
continue
# The creator of this project should always appear first in the list - so
# don't add him to this list, but hard-code his name.
if author_copy in ('Justin Driscoll', 'justin.driscoll'):
continue
if author_copy == 'richardbarran':
author_copy = 'Richard Barran'
if author_copy in contributors_dict:
contributors_dict[author_copy] += 1
else:
contributors_dict[author_copy] = 1
with codecs.open('CONTRIBUTORS.txt', 'w', encoding='utf8') as f:
f.write('Photologue is made possible by all the people who have contributed'
' to it. A non-exhaustive list follows:\n\n')
f.write('Justin Driscoll\n')
for i in sorted(contributors_dict, key=contributors_dict.get, reverse=True):
f.write(i + '\n')
if subprocess.check_output(["git", "diff", "CONTRIBUTORS.txt"]):
subprocess.check_output(["git", "commit", "-m", "Updated the list of contributors.", "CONTRIBUTORS.txt"])
| true | true |
f71ef585eef7e5fc1bee36b16be5ea58999073c9 | 465 | py | Python | Python/RotateArrayTest.py | TonnyL/Windary | 39f85cdedaaf5b85f7ce842ecef975301fc974cf | [
"MIT"
] | 205 | 2017-11-16T08:38:46.000Z | 2022-03-06T05:50:03.000Z | Python/RotateArrayTest.py | santosh241/Windary | 39f85cdedaaf5b85f7ce842ecef975301fc974cf | [
"MIT"
] | 3 | 2018-04-10T10:17:52.000Z | 2020-12-11T08:00:09.000Z | Python/RotateArrayTest.py | santosh241/Windary | 39f85cdedaaf5b85f7ce842ecef975301fc974cf | [
"MIT"
] | 28 | 2018-04-10T06:42:42.000Z | 2021-09-14T14:15:39.000Z | from unittest import TestCase
from RotateArray import RotateArray
class TestRotateArray(TestCase):
def test_rotate(self):
ra = RotateArray()
array0 = [1]
ra.rotate(array0, 1)
self.assertEqual(array0, [1])
array1 = [1, 2]
ra.rotate(array1, 1)
self.assertEqual(array1, [2, 1])
array2 = [1, 2, 3, 4, 5, 6, 7]
ra.rotate(array2, 3)
self.assertEqual(array2, [5, 6, 7, 1, 2, 3, 4])
| 22.142857 | 55 | 0.565591 | from unittest import TestCase
from RotateArray import RotateArray
class TestRotateArray(TestCase):
def test_rotate(self):
ra = RotateArray()
array0 = [1]
ra.rotate(array0, 1)
self.assertEqual(array0, [1])
array1 = [1, 2]
ra.rotate(array1, 1)
self.assertEqual(array1, [2, 1])
array2 = [1, 2, 3, 4, 5, 6, 7]
ra.rotate(array2, 3)
self.assertEqual(array2, [5, 6, 7, 1, 2, 3, 4])
| true | true |
f71ef5f561ecfaddb53695c882f1a617b5a5d0a2 | 1,413 | py | Python | pyart/testing/sample_files.py | kmuehlbauer/pyart | 4accda3fc02490d135373ad5b054899c6781e762 | [
"OLDAP-2.6",
"Python-2.0"
] | null | null | null | pyart/testing/sample_files.py | kmuehlbauer/pyart | 4accda3fc02490d135373ad5b054899c6781e762 | [
"OLDAP-2.6",
"Python-2.0"
] | null | null | null | pyart/testing/sample_files.py | kmuehlbauer/pyart | 4accda3fc02490d135373ad5b054899c6781e762 | [
"OLDAP-2.6",
"Python-2.0"
] | null | null | null | """
pyart.testing.sample_files
==========================
Sample radar files in a number of formats. Many of these files
are incomplete, they should only be used for testing, not production.
.. autosummary::
:toctree: generated/
MDV_PPI_FILE
MDV_RHI_FILE
CFRADIAL_PPI_FILE
CFRADIAL_RHI_FILE
CHL_RHI_FILE
SIGMET_PPI_FILE
SIGMET_RHI_FILE
NEXRAD_ARCHIVE_FILE
NEXRAD_ARCHIVE_COMPRESSED_FILE
NEXRAD_CDM_FILE
INTERP_SOUNDE_FILE
"""
import os
DATA_PATH = os.path.join(os.path.dirname(__file__), 'data')
MDV_PPI_FILE = os.path.join(DATA_PATH, 'example_mdv_ppi.mdv')
MDV_RHI_FILE = os.path.join(DATA_PATH, 'example_mdv_rhi.mdv')
CFRADIAL_PPI_FILE = os.path.join(DATA_PATH, 'example_cfradial_ppi.nc')
CFRADIAL_RHI_FILE = os.path.join(DATA_PATH, 'example_cfradial_rhi.nc')
CHL_RHI_FILE = os.path.join(DATA_PATH, 'example_chl_rhi.chl')
SIGMET_PPI_FILE = os.path.join(DATA_PATH, 'example_sigmet_ppi.sigmet')
SIGMET_RHI_FILE = os.path.join(DATA_PATH, 'example_sigmet_rhi.sigmet')
NEXRAD_ARCHIVE_FILE = os.path.join(DATA_PATH, 'example_nexrad_archive.bz2')
NEXRAD_ARCHIVE_COMPRESSED_FILE = os.path.join(
DATA_PATH, 'example_nexrad_archive_compressed.ar2v')
NEXRAD_CDM_FILE = os.path.join(DATA_PATH, 'example_nexrad_cdm.bz2')
INTERP_SOUNDE_FILE = os.path.join(DATA_PATH, 'example_interpolatedsonde.cdf')
_EXAMPLE_RAYS_FILE = os.path.join(DATA_PATH, 'example_rays.npz')
| 33.642857 | 77 | 0.772116 |
import os
DATA_PATH = os.path.join(os.path.dirname(__file__), 'data')
MDV_PPI_FILE = os.path.join(DATA_PATH, 'example_mdv_ppi.mdv')
MDV_RHI_FILE = os.path.join(DATA_PATH, 'example_mdv_rhi.mdv')
CFRADIAL_PPI_FILE = os.path.join(DATA_PATH, 'example_cfradial_ppi.nc')
CFRADIAL_RHI_FILE = os.path.join(DATA_PATH, 'example_cfradial_rhi.nc')
CHL_RHI_FILE = os.path.join(DATA_PATH, 'example_chl_rhi.chl')
SIGMET_PPI_FILE = os.path.join(DATA_PATH, 'example_sigmet_ppi.sigmet')
SIGMET_RHI_FILE = os.path.join(DATA_PATH, 'example_sigmet_rhi.sigmet')
NEXRAD_ARCHIVE_FILE = os.path.join(DATA_PATH, 'example_nexrad_archive.bz2')
NEXRAD_ARCHIVE_COMPRESSED_FILE = os.path.join(
DATA_PATH, 'example_nexrad_archive_compressed.ar2v')
NEXRAD_CDM_FILE = os.path.join(DATA_PATH, 'example_nexrad_cdm.bz2')
INTERP_SOUNDE_FILE = os.path.join(DATA_PATH, 'example_interpolatedsonde.cdf')
_EXAMPLE_RAYS_FILE = os.path.join(DATA_PATH, 'example_rays.npz')
| true | true |
f71ef602df7023b9ceea5044b4fb1a54d0c1617e | 4,760 | py | Python | tests/problems/np/test_graphpartitioning.py | panaali/qubovert | d5ea46349d2a058954fb2cb06f559c0d3fb382c5 | [
"Apache-2.0"
] | 15 | 2020-07-10T20:46:50.000Z | 2021-12-29T05:01:55.000Z | tests/problems/np/test_graphpartitioning.py | panaali/qubovert | d5ea46349d2a058954fb2cb06f559c0d3fb382c5 | [
"Apache-2.0"
] | 13 | 2020-06-12T22:37:59.000Z | 2022-02-22T20:11:13.000Z | tests/problems/np/test_graphpartitioning.py | panaali/qubovert | d5ea46349d2a058954fb2cb06f559c0d3fb382c5 | [
"Apache-2.0"
] | 4 | 2020-05-13T06:02:38.000Z | 2022-03-22T20:45:23.000Z | # Copyright 2020 Joseph T. Iosue
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Contains tests for the GraphPartitioning class.
"""
from qubovert.problems import GraphPartitioning
from qubovert.utils import (
solve_qubo_bruteforce, solve_quso_bruteforce,
solve_pubo_bruteforce, solve_puso_bruteforce
)
from numpy import allclose
edges = {("a", "b"), ("a", "c"), ("c", "d"),
("b", "c"), ("e", "f"), ("d", "e")}
problem = GraphPartitioning(edges)
solutions = (
({"a", "b", "c"}, {"d", "e", "f"}),
({"d", "e", "f"}, {"a", "b", "c"})
)
problem_weighted = GraphPartitioning({(0, 1): 1, (1, 2): 3, (0, 3): 1})
solutions_weighted = (
({0, 3}, {1, 2}),
({1, 2}, {0, 3})
)
def test_graphpartitioning_str():
assert eval(str(problem)) == problem
def test_graphpartitioning_properties():
assert problem.E == edges
problem.V
problem.degree
problem.weights
def test_graphpartitioning_bruteforce():
assert problem.solve_bruteforce() in solutions
assert (
problem.solve_bruteforce(all_solutions=True) in
(list(solutions), list(reversed(solutions)))
)
# QUBO
def test_graphpartitioning_qubo_solve():
e, sol = solve_qubo_bruteforce(problem.to_qubo())
solution = problem.convert_solution(sol)
assert solution in solutions
assert problem.is_solution_valid(solution)
assert problem.is_solution_valid(sol)
assert allclose(e, 1)
e, sol = solve_qubo_bruteforce(problem.to_qubo(10))
solution = problem.convert_solution(sol)
assert solution in solutions
assert problem.is_solution_valid(solution)
assert problem.is_solution_valid(sol)
assert allclose(e, 1)
e, sol = solve_qubo_bruteforce(problem_weighted.to_qubo())
solution = problem_weighted.convert_solution(sol)
assert solution == problem_weighted.convert_solution(
[sol[i] for i in range(problem_weighted.num_binary_variables)]
)
assert solution in solutions_weighted
assert problem_weighted.is_solution_valid(solution)
assert problem_weighted.is_solution_valid(sol)
assert allclose(e, 1)
def test_graphpartitioning_qubo_numvars():
Q = problem.to_qubo()
assert (
len(set(y for x in Q for y in x)) ==
problem.num_binary_variables ==
Q.num_binary_variables
)
# quso
def test_graphpartitioning_quso_solve():
e, sol = solve_quso_bruteforce(problem.to_quso())
solution = problem.convert_solution(sol)
assert solution in solutions
assert problem.is_solution_valid(solution)
assert problem.is_solution_valid(sol)
assert allclose(e, 1)
e, sol = solve_quso_bruteforce(problem_weighted.to_quso())
solution = problem_weighted.convert_solution(sol)
assert solution in solutions_weighted
assert problem_weighted.is_solution_valid(solution)
assert problem_weighted.is_solution_valid(sol)
assert allclose(e, 1)
def test_graphpartitioning_quso_numvars():
L = problem.to_quso()
assert L.num_binary_variables == problem.num_binary_variables
# PUBO
def test_graphpartitioning_pubo_solve():
e, sol = solve_pubo_bruteforce(problem.to_pubo())
solution = problem.convert_solution(sol)
assert solution in solutions
assert problem.is_solution_valid(solution)
assert problem.is_solution_valid(sol)
assert allclose(e, 1)
e, sol = solve_pubo_bruteforce(problem_weighted.to_pubo())
solution = problem_weighted.convert_solution(sol)
assert solution in solutions_weighted
assert problem_weighted.is_solution_valid(solution)
assert problem_weighted.is_solution_valid(sol)
assert allclose(e, 1)
# puso
def test_graphpartitioning_puso_solve():
e, sol = solve_puso_bruteforce(problem.to_puso())
solution = problem.convert_solution(sol)
assert solution in solutions
assert problem.is_solution_valid(solution)
assert problem.is_solution_valid(sol)
assert allclose(e, 1)
e, sol = solve_puso_bruteforce(problem_weighted.to_puso())
solution = problem_weighted.convert_solution(sol)
assert solution in solutions_weighted
assert problem_weighted.is_solution_valid(solution)
assert problem_weighted.is_solution_valid(sol)
assert allclose(e, 1)
| 27.514451 | 76 | 0.721218 |
from qubovert.problems import GraphPartitioning
from qubovert.utils import (
solve_qubo_bruteforce, solve_quso_bruteforce,
solve_pubo_bruteforce, solve_puso_bruteforce
)
from numpy import allclose
edges = {("a", "b"), ("a", "c"), ("c", "d"),
("b", "c"), ("e", "f"), ("d", "e")}
problem = GraphPartitioning(edges)
solutions = (
({"a", "b", "c"}, {"d", "e", "f"}),
({"d", "e", "f"}, {"a", "b", "c"})
)
problem_weighted = GraphPartitioning({(0, 1): 1, (1, 2): 3, (0, 3): 1})
solutions_weighted = (
({0, 3}, {1, 2}),
({1, 2}, {0, 3})
)
def test_graphpartitioning_str():
assert eval(str(problem)) == problem
def test_graphpartitioning_properties():
assert problem.E == edges
problem.V
problem.degree
problem.weights
def test_graphpartitioning_bruteforce():
assert problem.solve_bruteforce() in solutions
assert (
problem.solve_bruteforce(all_solutions=True) in
(list(solutions), list(reversed(solutions)))
)
def test_graphpartitioning_qubo_solve():
e, sol = solve_qubo_bruteforce(problem.to_qubo())
solution = problem.convert_solution(sol)
assert solution in solutions
assert problem.is_solution_valid(solution)
assert problem.is_solution_valid(sol)
assert allclose(e, 1)
e, sol = solve_qubo_bruteforce(problem.to_qubo(10))
solution = problem.convert_solution(sol)
assert solution in solutions
assert problem.is_solution_valid(solution)
assert problem.is_solution_valid(sol)
assert allclose(e, 1)
e, sol = solve_qubo_bruteforce(problem_weighted.to_qubo())
solution = problem_weighted.convert_solution(sol)
assert solution == problem_weighted.convert_solution(
[sol[i] for i in range(problem_weighted.num_binary_variables)]
)
assert solution in solutions_weighted
assert problem_weighted.is_solution_valid(solution)
assert problem_weighted.is_solution_valid(sol)
assert allclose(e, 1)
def test_graphpartitioning_qubo_numvars():
Q = problem.to_qubo()
assert (
len(set(y for x in Q for y in x)) ==
problem.num_binary_variables ==
Q.num_binary_variables
)
def test_graphpartitioning_quso_solve():
e, sol = solve_quso_bruteforce(problem.to_quso())
solution = problem.convert_solution(sol)
assert solution in solutions
assert problem.is_solution_valid(solution)
assert problem.is_solution_valid(sol)
assert allclose(e, 1)
e, sol = solve_quso_bruteforce(problem_weighted.to_quso())
solution = problem_weighted.convert_solution(sol)
assert solution in solutions_weighted
assert problem_weighted.is_solution_valid(solution)
assert problem_weighted.is_solution_valid(sol)
assert allclose(e, 1)
def test_graphpartitioning_quso_numvars():
L = problem.to_quso()
assert L.num_binary_variables == problem.num_binary_variables
def test_graphpartitioning_pubo_solve():
e, sol = solve_pubo_bruteforce(problem.to_pubo())
solution = problem.convert_solution(sol)
assert solution in solutions
assert problem.is_solution_valid(solution)
assert problem.is_solution_valid(sol)
assert allclose(e, 1)
e, sol = solve_pubo_bruteforce(problem_weighted.to_pubo())
solution = problem_weighted.convert_solution(sol)
assert solution in solutions_weighted
assert problem_weighted.is_solution_valid(solution)
assert problem_weighted.is_solution_valid(sol)
assert allclose(e, 1)
def test_graphpartitioning_puso_solve():
e, sol = solve_puso_bruteforce(problem.to_puso())
solution = problem.convert_solution(sol)
assert solution in solutions
assert problem.is_solution_valid(solution)
assert problem.is_solution_valid(sol)
assert allclose(e, 1)
e, sol = solve_puso_bruteforce(problem_weighted.to_puso())
solution = problem_weighted.convert_solution(sol)
assert solution in solutions_weighted
assert problem_weighted.is_solution_valid(solution)
assert problem_weighted.is_solution_valid(sol)
assert allclose(e, 1)
| true | true |
f71ef6adf9445440b9d5b44b2d38774311b99add | 2,408 | py | Python | geocode/latlons2llsoa.py | JackKelly/Geocode | b3cc89c7467384e41c5be6bcd80b36271cfc252c | [
"MIT"
] | 8 | 2020-06-03T14:49:10.000Z | 2021-05-23T19:50:23.000Z | geocode/latlons2llsoa.py | JackKelly/Geocode | b3cc89c7467384e41c5be6bcd80b36271cfc252c | [
"MIT"
] | 7 | 2021-05-21T11:19:13.000Z | 2022-03-04T13:20:11.000Z | geocode/latlons2llsoa.py | JackKelly/Geocode | b3cc89c7467384e41c5be6bcd80b36271cfc252c | [
"MIT"
] | 2 | 2021-05-21T11:00:32.000Z | 2021-06-15T13:58:55.000Z | #!/usr/bin/env python3
"""
Load a list of lat/lons from a CSV file and reverse-geocode them to LLSOA.
- Jamie Taylor <jamie.taylor@sheffield.ac.uk>
- First Authored: 2020-04-16
"""
import sys
import os
import argparse
import time as TIME
import pandas as pd
from geocode import Geocoder, query_yes_no
def parse_options():
"""Parse command line options."""
parser = argparse.ArgumentParser(description=("This is a command line interface (CLI) for "
"the latlons2llsoa.py module"),
epilog="Jamie Taylor, 2020-04-16")
parser.add_argument("-f", "--input-file", dest="infile", action="store", type=str,
required=True, metavar="</path/to/file>",
help="Specify a CSV file containing a list of latitudes and longitudes to "
"be reverse-geocoded. The file must contain the columns 'latitude' "
"and 'longitude' (it can contain others, all of which will be kept).")
parser.add_argument("-o", "--output-file", dest="outfile", action="store", type=str,
required=True, metavar="</path/to/file>", help="Specify an output file.")
parser.add_argument("--datazones", dest="datazones", action="store_true",
required=False, help="Specify to use Data Zones in Scotland.")
options = parser.parse_args()
if not os.path.isfile(options.infile):
raise Exception(f"The input file '{options.infile}' does not exist.")
if os.path.isfile(options.outfile):
check = query_yes_no(f"The outfile '{options.outfile}' already exists, are you sure you "
"wish to overwrite?", "no")
if not check:
print("Quitting...")
sys.exit(0)
return options
def main():
timerstart = TIME.time()
options = parse_options()
with open(options.infile, "r") as fid:
df = pd.read_csv(fid)
with Geocoder(progress_bar=True) as geo:
# import pdb; pdb.set_trace()
df["llsoacd"] = geo.reverse_geocode_llsoa(df[["latitude", "longitude"]].to_numpy(),
options.datazones)
df.to_csv(options.outfile, index=False)
print(f"Finished, time taken: {TIME.time() - timerstart} seconds")
if __name__ == "__main__":
main()
| 43 | 99 | 0.5951 |
import sys
import os
import argparse
import time as TIME
import pandas as pd
from geocode import Geocoder, query_yes_no
def parse_options():
parser = argparse.ArgumentParser(description=("This is a command line interface (CLI) for "
"the latlons2llsoa.py module"),
epilog="Jamie Taylor, 2020-04-16")
parser.add_argument("-f", "--input-file", dest="infile", action="store", type=str,
required=True, metavar="</path/to/file>",
help="Specify a CSV file containing a list of latitudes and longitudes to "
"be reverse-geocoded. The file must contain the columns 'latitude' "
"and 'longitude' (it can contain others, all of which will be kept).")
parser.add_argument("-o", "--output-file", dest="outfile", action="store", type=str,
required=True, metavar="</path/to/file>", help="Specify an output file.")
parser.add_argument("--datazones", dest="datazones", action="store_true",
required=False, help="Specify to use Data Zones in Scotland.")
options = parser.parse_args()
if not os.path.isfile(options.infile):
raise Exception(f"The input file '{options.infile}' does not exist.")
if os.path.isfile(options.outfile):
check = query_yes_no(f"The outfile '{options.outfile}' already exists, are you sure you "
"wish to overwrite?", "no")
if not check:
print("Quitting...")
sys.exit(0)
return options
def main():
timerstart = TIME.time()
options = parse_options()
with open(options.infile, "r") as fid:
df = pd.read_csv(fid)
with Geocoder(progress_bar=True) as geo:
df["llsoacd"] = geo.reverse_geocode_llsoa(df[["latitude", "longitude"]].to_numpy(),
options.datazones)
df.to_csv(options.outfile, index=False)
print(f"Finished, time taken: {TIME.time() - timerstart} seconds")
if __name__ == "__main__":
main()
| true | true |
f71ef86436cc76a69e74c61126c0c795ea593197 | 1,017 | py | Python | Aulas Python/Aula016.py | RenanRibeiroDaSilva/Meu-Aprendizado-Python | 280bf2ad132ae0d26255e70b894fa7dbb69a5d01 | [
"MIT"
] | 2 | 2021-05-21T23:17:44.000Z | 2021-05-22T04:34:37.000Z | Aulas Python/Aula016.py | RenanRibeiroDaSilva/Meu-Aprendizado-Python | 280bf2ad132ae0d26255e70b894fa7dbb69a5d01 | [
"MIT"
] | null | null | null | Aulas Python/Aula016.py | RenanRibeiroDaSilva/Meu-Aprendizado-Python | 280bf2ad132ae0d26255e70b894fa7dbb69a5d01 | [
"MIT"
] | null | null | null | """ Aula - 016 - Tuplas"""
# Teoria:
# Variáveis compostas:
'''
Tuplas, o que são?:
Em uma explicação bem simples, é uma variável que guarda vários valores!
As tuplas usam indexições para indentificar seu componentes internos!
As tuplas são imultáveis!
'''
# Parte prática:
lanche = ('Hamburguer', 'Suco', 'Pizza', 'Pudim', 'Batata Frita')
print(lanche[0])
print(lanche[1])
print(lanche[2])
print(lanche[3], '\n')
for comida in lanche:
print(f'Eu vou comer {comida}')
print('Comi pra caramba\n')
for cont in range(0, len(lanche)):
print(f'Eu vou comer {lanche[cont]} na posição {cont}')
print('Comi muito!')
for pos, comida in enumerate(lanche):
print(f'Eu vou comer {comida} na posição {pos}')
print('Estou farto!')
print(sorted(lanche)) # ------> sorted = organizar.
a = (2, 5, 4)
b = (5, 8, 1, 2)
c = a + b
print(c)
print(c.count(2))
del (lanche) # Uma tupla é imutável, mas podemos deletar ela inteira. Se tentarmos deletar apenas
# uma indexação ele dara erro.
| 23.651163 | 98 | 0.656834 |
lanche = ('Hamburguer', 'Suco', 'Pizza', 'Pudim', 'Batata Frita')
print(lanche[0])
print(lanche[1])
print(lanche[2])
print(lanche[3], '\n')
for comida in lanche:
print(f'Eu vou comer {comida}')
print('Comi pra caramba\n')
for cont in range(0, len(lanche)):
print(f'Eu vou comer {lanche[cont]} na posição {cont}')
print('Comi muito!')
for pos, comida in enumerate(lanche):
print(f'Eu vou comer {comida} na posição {pos}')
print('Estou farto!')
print(sorted(lanche))
a = (2, 5, 4)
b = (5, 8, 1, 2)
c = a + b
print(c)
print(c.count(2))
del (lanche)
| true | true |
f71ef89a094d1be41d5d328cb9ace8e241cf546b | 2,543 | py | Python | examples/acoustics_2d_radial/1drad/setplot.py | navravi/amrclaw | 727d98d243c521267c927f6fe107ba6f1155597b | [
"BSD-3-Clause"
] | 16 | 2015-05-27T08:16:09.000Z | 2022-01-21T06:36:24.000Z | examples/acoustics_2d_radial/1drad/setplot.py | navravi/amrclaw | 727d98d243c521267c927f6fe107ba6f1155597b | [
"BSD-3-Clause"
] | 107 | 2015-01-02T19:51:43.000Z | 2021-11-24T03:35:32.000Z | examples/acoustics_2d_radial/1drad/setplot.py | BrisaDavis/amrclaw | c5cacdf00f1959e160ea5616cdf6ea7b6cd374f3 | [
"BSD-3-Clause"
] | 28 | 2015-01-10T00:03:56.000Z | 2022-02-11T23:52:34.000Z |
"""
Set up the plot figures, axes, and items to be done for each frame.
This module is imported by the plotting routines and then the
function setplot is called to set the plot parameters.
"""
#--------------------------
def setplot(plotdata=None):
#--------------------------
"""
Specify what is to be plotted at each frame.
Input: plotdata, an instance of pyclaw.plotters.data.ClawPlotData.
Output: a modified version of plotdata.
"""
if plotdata is None:
from clawpack.visclaw.data import ClawPlotData
plotdata = ClawPlotData()
plotdata.clearfigures() # clear any old figures,axes,items data
# Figure for q[0]
plotfigure = plotdata.new_plotfigure(name='q[0]', figno=0)
# Set up for axes in this figure:
plotaxes = plotfigure.new_plotaxes()
plotaxes.xlimits = [0,1.5]
plotaxes.ylimits = [-2.,4.]
plotaxes.title = 'q[0]'
# Set up for item on these axes:
plotitem = plotaxes.new_plotitem(plot_type='1d')
plotitem.plot_var = 0
plotitem.plotstyle = '-'
plotitem.color = 'b'
plotitem.show = True # show on plot?
# Figure for q[1]
plotfigure = plotdata.new_plotfigure(name='q[1]', figno=1)
# Set up for axes in this figure:
plotaxes = plotfigure.new_plotaxes()
plotaxes.xlimits = 'auto'
plotaxes.ylimits = 'auto'
plotaxes.title = 'q[1]'
# Set up for item on these axes:
plotitem = plotaxes.new_plotitem(plot_type='1d')
plotitem.plot_var = 1
plotitem.plotstyle = '-'
plotitem.color = 'b'
plotitem.show = True # show on plot?
# Parameters used only when creating html and/or latex hardcopy
# e.g., via pyclaw.plotters.frametools.printframes:
plotdata.printfigs = True # print figures
plotdata.print_format = 'png' # file format
plotdata.print_framenos = 'all' # list of frames to print
plotdata.print_fignos = 'all' # list of figures to print
plotdata.html = True # create html files of plots?
plotdata.html_homelink = '../README.html' # pointer for top of index
plotdata.latex = True # create latex file of plots?
plotdata.latex_figsperline = 2 # layout of plots
plotdata.latex_framesperline = 1 # layout of plots
plotdata.latex_makepdf = False # also run pdflatex?
plotdata.parallel = True # make multiple frame png's at once
return plotdata
| 31.012195 | 80 | 0.616201 |
def setplot(plotdata=None):
if plotdata is None:
from clawpack.visclaw.data import ClawPlotData
plotdata = ClawPlotData()
plotdata.clearfigures()
plotfigure = plotdata.new_plotfigure(name='q[0]', figno=0)
plotaxes = plotfigure.new_plotaxes()
plotaxes.xlimits = [0,1.5]
plotaxes.ylimits = [-2.,4.]
plotaxes.title = 'q[0]'
plotitem = plotaxes.new_plotitem(plot_type='1d')
plotitem.plot_var = 0
plotitem.plotstyle = '-'
plotitem.color = 'b'
plotitem.show = True
plotfigure = plotdata.new_plotfigure(name='q[1]', figno=1)
plotaxes = plotfigure.new_plotaxes()
plotaxes.xlimits = 'auto'
plotaxes.ylimits = 'auto'
plotaxes.title = 'q[1]'
plotitem = plotaxes.new_plotitem(plot_type='1d')
plotitem.plot_var = 1
plotitem.plotstyle = '-'
plotitem.color = 'b'
plotitem.show = True
plotdata.printfigs = True
plotdata.print_format = 'png'
plotdata.print_framenos = 'all'
plotdata.print_fignos = 'all'
plotdata.html = True
plotdata.html_homelink = '../README.html'
plotdata.latex = True
plotdata.latex_figsperline = 2
plotdata.latex_framesperline = 1
plotdata.latex_makepdf = False
plotdata.parallel = True
return plotdata
| true | true |
f71ef8c3544ef60ba5a8d26863bc684e75fe233f | 750 | py | Python | cv2ools/cli.py | narumiruna/opencv-tools | 444a2ec31e01d6dc023ad40a7187f493c91e7ce2 | [
"MIT"
] | 1 | 2019-12-20T08:33:09.000Z | 2019-12-20T08:33:09.000Z | cv2ools/cli.py | narumiruna/opencv-tools | 444a2ec31e01d6dc023ad40a7187f493c91e7ce2 | [
"MIT"
] | 1 | 2020-04-22T02:43:22.000Z | 2020-06-08T04:14:52.000Z | cv2ools/cli.py | narumiruna/opencv-tools | 444a2ec31e01d6dc023ad40a7187f493c91e7ce2 | [
"MIT"
] | null | null | null | from pathlib import Path
import click
import cv2
from .core import VideoWriter
@click.group()
def cli():
pass
@cli.command()
@click.argument('image-dir')
@click.option('-o', '--output', default=None)
@click.option('--fps', default=60)
def merge(image_dir, output, fps):
r"""Create video from images"""
image_extensions = ['.jpg', '.jpeg', '.png']
image_dir = Path(image_dir)
if output is None:
output = image_dir.with_suffix('.mp4')
paths = [path for path in sorted(image_dir.iterdir()) if path.suffix in image_extensions]
with VideoWriter(output, fps) as writer:
for path in paths:
image = cv2.imread(str(path))
writer.write(image)
if __name__ == '__main__':
cli()
| 20.27027 | 93 | 0.642667 | from pathlib import Path
import click
import cv2
from .core import VideoWriter
@click.group()
def cli():
pass
@cli.command()
@click.argument('image-dir')
@click.option('-o', '--output', default=None)
@click.option('--fps', default=60)
def merge(image_dir, output, fps):
image_extensions = ['.jpg', '.jpeg', '.png']
image_dir = Path(image_dir)
if output is None:
output = image_dir.with_suffix('.mp4')
paths = [path for path in sorted(image_dir.iterdir()) if path.suffix in image_extensions]
with VideoWriter(output, fps) as writer:
for path in paths:
image = cv2.imread(str(path))
writer.write(image)
if __name__ == '__main__':
cli()
| true | true |
f71ef8c9bc4b3379b8be5bf1c34f17b64744b949 | 1,716 | py | Python | plugins/plugin_hevc/hevc_codec.py | G-AshwinKumar/experiment-notebook | aae1c5fb9ef8f84dce5d75989ed8975797282f37 | [
"MIT"
] | null | null | null | plugins/plugin_hevc/hevc_codec.py | G-AshwinKumar/experiment-notebook | aae1c5fb9ef8f84dce5d75989ed8975797282f37 | [
"MIT"
] | null | null | null | plugins/plugin_hevc/hevc_codec.py | G-AshwinKumar/experiment-notebook | aae1c5fb9ef8f84dce5d75989ed8975797282f37 | [
"MIT"
] | null | null | null | import os
from enb import icompression
from enb.config import get_options
options = get_options()
class HEVC(icompression.WrapperCodec, icompression.LosslessCodec):
def __init__(self, config_path=None, chroma_format="400"):
config_path = config_path if config_path is not None \
else os.path.join(os.path.dirname(os.path.abspath(__file__)),
f"hevc_lossless_{chroma_format}.cfg")
chroma_format = str(chroma_format)
assert chroma_format in ["400"], f"Chroma format {chroma_format} not supported."
icompression.WrapperCodec.__init__(
self,
compressor_path=os.path.join(os.path.dirname(os.path.abspath(__file__)), "TAppEncoderStatic"),
decompressor_path=os.path.join(os.path.dirname(os.path.abspath(__file__)), "TAppDecoderStatic"),
param_dict=dict(chroma_format=chroma_format))
self.config_path = config_path
def get_compression_params(self, original_path, compressed_path, original_file_info):
return f"-i {original_path} -c {self.config_path} -b {compressed_path} -wdt {original_file_info['width']} " \
f"-hgt {original_file_info['height']} -f {original_file_info['component_count']} " \
f"-cf {self.param_dict['chroma_format']} --InputChromaFormat={self.param_dict['chroma_format']} " \
f"--InputBitDepth={8 * original_file_info['bytes_per_sample']}"
def get_decompression_params(self, compressed_path, reconstructed_path, original_file_info):
return f"-b {compressed_path} -o {reconstructed_path} -d {8 * original_file_info['bytes_per_sample']}"
@property
def label(self):
return "HEVC"
| 46.378378 | 117 | 0.689977 | import os
from enb import icompression
from enb.config import get_options
options = get_options()
class HEVC(icompression.WrapperCodec, icompression.LosslessCodec):
def __init__(self, config_path=None, chroma_format="400"):
config_path = config_path if config_path is not None \
else os.path.join(os.path.dirname(os.path.abspath(__file__)),
f"hevc_lossless_{chroma_format}.cfg")
chroma_format = str(chroma_format)
assert chroma_format in ["400"], f"Chroma format {chroma_format} not supported."
icompression.WrapperCodec.__init__(
self,
compressor_path=os.path.join(os.path.dirname(os.path.abspath(__file__)), "TAppEncoderStatic"),
decompressor_path=os.path.join(os.path.dirname(os.path.abspath(__file__)), "TAppDecoderStatic"),
param_dict=dict(chroma_format=chroma_format))
self.config_path = config_path
def get_compression_params(self, original_path, compressed_path, original_file_info):
return f"-i {original_path} -c {self.config_path} -b {compressed_path} -wdt {original_file_info['width']} " \
f"-hgt {original_file_info['height']} -f {original_file_info['component_count']} " \
f"-cf {self.param_dict['chroma_format']} --InputChromaFormat={self.param_dict['chroma_format']} " \
f"--InputBitDepth={8 * original_file_info['bytes_per_sample']}"
def get_decompression_params(self, compressed_path, reconstructed_path, original_file_info):
return f"-b {compressed_path} -o {reconstructed_path} -d {8 * original_file_info['bytes_per_sample']}"
@property
def label(self):
return "HEVC"
| true | true |
f71ef8f8f357ec451ae6259a9f7e22daf2e0c8ec | 221 | py | Python | spec/fixtures/one.py | elarivie/atom-python-linter | 54190a2817ada801849bee4cffca95bd75123573 | [
"MIT"
] | 5 | 2020-01-16T11:21:29.000Z | 2021-05-23T03:32:49.000Z | spec/fixtures/one.py | elarivie/atom-python-linter | 54190a2817ada801849bee4cffca95bd75123573 | [
"MIT"
] | 8 | 2020-01-16T11:44:26.000Z | 2020-11-25T01:58:25.000Z | spec/fixtures/one.py | elarivie/atom-python-linter | 54190a2817ada801849bee4cffca95bd75123573 | [
"MIT"
] | 2 | 2021-01-19T08:30:09.000Z | 2021-04-19T07:50:26.000Z | """This file contains by default (without config file) ONE lint message.
for EVERY linter:
- flake8
- mypy
- pydocstyle
- pylint
"""
X = lambda: 1
def one() -> int:
"""One.
"""
return one
return 1
| 12.277778 | 72 | 0.597285 | X = lambda: 1
def one() -> int:
return one
return 1
| true | true |
f71ef9c89f7f1d80f649b6d8f7720bf60c9f6349 | 9,850 | py | Python | kornia/enhance/histogram.py | Ishticode/kornia | 974abb43ec72d12dbd244a2fb247bbbab8498de0 | [
"ECL-2.0",
"Apache-2.0"
] | 418 | 2018-10-02T22:31:36.000Z | 2019-01-16T14:15:45.000Z | kornia/enhance/histogram.py | Ishticode/kornia | 974abb43ec72d12dbd244a2fb247bbbab8498de0 | [
"ECL-2.0",
"Apache-2.0"
] | 94 | 2019-01-17T22:10:45.000Z | 2019-05-22T23:47:58.000Z | kornia/enhance/histogram.py | Ishticode/kornia | 974abb43ec72d12dbd244a2fb247bbbab8498de0 | [
"ECL-2.0",
"Apache-2.0"
] | 25 | 2018-10-02T22:50:04.000Z | 2019-01-13T18:14:11.000Z | from typing import Optional, Tuple
import torch
def marginal_pdf(
values: torch.Tensor, bins: torch.Tensor, sigma: torch.Tensor, epsilon: float = 1e-10
) -> Tuple[torch.Tensor, torch.Tensor]:
"""Calculate the marginal probability distribution function of the input tensor based on the number of
histogram bins.
Args:
values: shape [BxNx1].
bins: shape [NUM_BINS].
sigma: shape [1], gaussian smoothing factor.
epsilon: scalar, for numerical stability.
Returns:
Tuple[torch.Tensor, torch.Tensor]:
- torch.Tensor: shape [BxN].
- torch.Tensor: shape [BxNxNUM_BINS].
"""
if not isinstance(values, torch.Tensor):
raise TypeError(f"Input values type is not a torch.Tensor. Got {type(values)}")
if not isinstance(bins, torch.Tensor):
raise TypeError(f"Input bins type is not a torch.Tensor. Got {type(bins)}")
if not isinstance(sigma, torch.Tensor):
raise TypeError(f"Input sigma type is not a torch.Tensor. Got {type(sigma)}")
if not values.dim() == 3:
raise ValueError("Input values must be a of the shape BxNx1." " Got {}".format(values.shape))
if not bins.dim() == 1:
raise ValueError("Input bins must be a of the shape NUM_BINS" " Got {}".format(bins.shape))
if not sigma.dim() == 0:
raise ValueError("Input sigma must be a of the shape 1" " Got {}".format(sigma.shape))
residuals = values - bins.unsqueeze(0).unsqueeze(0)
kernel_values = torch.exp(-0.5 * (residuals / sigma).pow(2))
pdf = torch.mean(kernel_values, dim=1)
normalization = torch.sum(pdf, dim=1).unsqueeze(1) + epsilon
pdf = pdf / normalization
return pdf, kernel_values
def joint_pdf(kernel_values1: torch.Tensor, kernel_values2: torch.Tensor, epsilon: float = 1e-10) -> torch.Tensor:
"""Calculate the joint probability distribution function of the input tensors based on the number of histogram
bins.
Args:
kernel_values1: shape [BxNxNUM_BINS].
kernel_values2: shape [BxNxNUM_BINS].
epsilon: scalar, for numerical stability.
Returns:
shape [BxNUM_BINSxNUM_BINS].
"""
if not isinstance(kernel_values1, torch.Tensor):
raise TypeError(f"Input kernel_values1 type is not a torch.Tensor. Got {type(kernel_values1)}")
if not isinstance(kernel_values2, torch.Tensor):
raise TypeError(f"Input kernel_values2 type is not a torch.Tensor. Got {type(kernel_values2)}")
if not kernel_values1.dim() == 3:
raise ValueError("Input kernel_values1 must be a of the shape BxN." " Got {}".format(kernel_values1.shape))
if not kernel_values2.dim() == 3:
raise ValueError("Input kernel_values2 must be a of the shape BxN." " Got {}".format(kernel_values2.shape))
if kernel_values1.shape != kernel_values2.shape:
raise ValueError(
"Inputs kernel_values1 and kernel_values2 must have the same shape."
" Got {} and {}".format(kernel_values1.shape, kernel_values2.shape)
)
joint_kernel_values = torch.matmul(kernel_values1.transpose(1, 2), kernel_values2)
normalization = torch.sum(joint_kernel_values, dim=(1, 2)).view(-1, 1, 1) + epsilon
pdf = joint_kernel_values / normalization
return pdf
def histogram(x: torch.Tensor, bins: torch.Tensor, bandwidth: torch.Tensor, epsilon: float = 1e-10) -> torch.Tensor:
"""Estimate the histogram of the input tensor.
The calculation uses kernel density estimation which requires a bandwidth (smoothing) parameter.
Args:
x: Input tensor to compute the histogram with shape :math:`(B, D)`.
bins: The number of bins to use the histogram :math:`(N_{bins})`.
bandwidth: Gaussian smoothing factor with shape shape [1].
epsilon: A scalar, for numerical stability.
Returns:
Computed histogram of shape :math:`(B, N_{bins})`.
Examples:
>>> x = torch.rand(1, 10)
>>> bins = torch.torch.linspace(0, 255, 128)
>>> hist = histogram(x, bins, bandwidth=torch.tensor(0.9))
>>> hist.shape
torch.Size([1, 128])
"""
pdf, _ = marginal_pdf(x.unsqueeze(2), bins, bandwidth, epsilon)
return pdf
def histogram2d(
x1: torch.Tensor, x2: torch.Tensor, bins: torch.Tensor, bandwidth: torch.Tensor, epsilon: float = 1e-10
) -> torch.Tensor:
"""Estimate the 2d histogram of the input tensor.
The calculation uses kernel density estimation which requires a bandwidth (smoothing) parameter.
Args:
x1: Input tensor to compute the histogram with shape :math:`(B, D1)`.
x2: Input tensor to compute the histogram with shape :math:`(B, D2)`.
bins: The number of bins to use the histogram :math:`(N_{bins})`.
bandwidth: Gaussian smoothing factor with shape shape [1].
epsilon: A scalar, for numerical stability. Default: 1e-10.
Returns:
Computed histogram of shape :math:`(B, N_{bins}), N_{bins})`.
Examples:
>>> x1 = torch.rand(2, 32)
>>> x2 = torch.rand(2, 32)
>>> bins = torch.torch.linspace(0, 255, 128)
>>> hist = histogram2d(x1, x2, bins, bandwidth=torch.tensor(0.9))
>>> hist.shape
torch.Size([2, 128, 128])
"""
_, kernel_values1 = marginal_pdf(x1.unsqueeze(2), bins, bandwidth, epsilon)
_, kernel_values2 = marginal_pdf(x2.unsqueeze(2), bins, bandwidth, epsilon)
pdf = joint_pdf(kernel_values1, kernel_values2)
return pdf
def image_histogram2d(
image: torch.Tensor,
min: float = 0.0,
max: float = 255.0,
n_bins: int = 256,
bandwidth: Optional[float] = None,
centers: Optional[torch.Tensor] = None,
return_pdf: bool = False,
kernel: str = "triangular",
eps: float = 1e-10,
) -> Tuple[torch.Tensor, torch.Tensor]:
"""Estimate the histogram of the input image(s).
The calculation uses triangular kernel density estimation.
Args:
image: Input tensor to compute the histogram with shape
:math:`(H, W)`, :math:`(C, H, W)` or :math:`(B, C, H, W)`.
min: Lower end of the interval (inclusive).
max: Upper end of the interval (inclusive). Ignored when
:attr:`centers` is specified.
n_bins: The number of histogram bins. Ignored when
:attr:`centers` is specified.
bandwidth: Smoothing factor. If not specified or equal to -1,
:math:`(bandwidth = (max - min) / n_bins)`.
centers: Centers of the bins with shape :math:`(n_bins,)`.
If not specified or empty, it is calculated as centers of
equal width bins of [min, max] range.
return_pdf: If True, also return probability densities for
each bin.
kernel: kernel to perform kernel density estimation
``(`triangular`, `gaussian`, `uniform`, `epanechnikov`)``.
Returns:
Computed histogram of shape :math:`(bins)`, :math:`(C, bins)`,
:math:`(B, C, bins)`.
Computed probability densities of shape :math:`(bins)`, :math:`(C, bins)`,
:math:`(B, C, bins)`, if return_pdf is ``True``. Tensor of zeros with shape
of the histogram otherwise.
"""
if image is not None and not isinstance(image, torch.Tensor):
raise TypeError(f"Input image type is not a torch.Tensor. Got {type(image)}.")
if centers is not None and not isinstance(centers, torch.Tensor):
raise TypeError(f"Bins' centers type is not a torch.Tensor. Got {type(centers)}.")
if centers is not None and len(centers.shape) > 0 and centers.dim() != 1:
raise ValueError(f"Bins' centers must be a torch.Tensor of the shape (n_bins,). Got {centers.shape}.")
if not isinstance(min, float):
raise TypeError(f'Type of lower end of the range is not a float. Got {type(min)}.')
if not isinstance(max, float):
raise TypeError(f"Type of upper end of the range is not a float. Got {type(min)}.")
if not isinstance(n_bins, int):
raise TypeError(f"Type of number of bins is not an int. Got {type(n_bins)}.")
if bandwidth is not None and not isinstance(bandwidth, float):
raise TypeError(f"Bandwidth type is not a float. Got {type(bandwidth)}.")
if not isinstance(return_pdf, bool):
raise TypeError(f"Return_pdf type is not a bool. Got {type(return_pdf)}.")
if bandwidth is None:
bandwidth = (max - min) / n_bins
if centers is None:
centers = min + bandwidth * (torch.arange(n_bins, device=image.device, dtype=image.dtype) + 0.5)
centers = centers.reshape(-1, 1, 1, 1, 1)
u = torch.abs(image.unsqueeze(0) - centers) / bandwidth
if kernel == "gaussian":
kernel_values = torch.exp(-0.5 * u ** 2)
elif kernel in ("triangular", "uniform", "epanechnikov",):
# compute the mask and cast to floating point
mask = (u <= 1).to(u.dtype)
if kernel == "triangular":
kernel_values = (1. - u) * mask
elif kernel == "uniform":
kernel_values = torch.ones_like(u) * mask
else: # kernel == "epanechnikov"
kernel_values = (1. - u ** 2) * mask
else:
raise ValueError(f"Kernel must be 'triangular', 'gaussian', " f"'uniform' or 'epanechnikov'. Got {kernel}.")
hist = torch.sum(kernel_values, dim=(-2, -1)).permute(1, 2, 0)
if return_pdf:
normalization = torch.sum(hist, dim=-1, keepdim=True) + eps
pdf = hist / normalization
if image.dim() == 2:
hist = hist.squeeze()
pdf = pdf.squeeze()
elif image.dim() == 3:
hist = hist.squeeze(0)
pdf = pdf.squeeze(0)
return hist, pdf
if image.dim() == 2:
hist = hist.squeeze()
elif image.dim() == 3:
hist = hist.squeeze(0)
return hist, torch.zeros_like(hist)
| 38.326848 | 116 | 0.63665 | from typing import Optional, Tuple
import torch
def marginal_pdf(
values: torch.Tensor, bins: torch.Tensor, sigma: torch.Tensor, epsilon: float = 1e-10
) -> Tuple[torch.Tensor, torch.Tensor]:
if not isinstance(values, torch.Tensor):
raise TypeError(f"Input values type is not a torch.Tensor. Got {type(values)}")
if not isinstance(bins, torch.Tensor):
raise TypeError(f"Input bins type is not a torch.Tensor. Got {type(bins)}")
if not isinstance(sigma, torch.Tensor):
raise TypeError(f"Input sigma type is not a torch.Tensor. Got {type(sigma)}")
if not values.dim() == 3:
raise ValueError("Input values must be a of the shape BxNx1." " Got {}".format(values.shape))
if not bins.dim() == 1:
raise ValueError("Input bins must be a of the shape NUM_BINS" " Got {}".format(bins.shape))
if not sigma.dim() == 0:
raise ValueError("Input sigma must be a of the shape 1" " Got {}".format(sigma.shape))
residuals = values - bins.unsqueeze(0).unsqueeze(0)
kernel_values = torch.exp(-0.5 * (residuals / sigma).pow(2))
pdf = torch.mean(kernel_values, dim=1)
normalization = torch.sum(pdf, dim=1).unsqueeze(1) + epsilon
pdf = pdf / normalization
return pdf, kernel_values
def joint_pdf(kernel_values1: torch.Tensor, kernel_values2: torch.Tensor, epsilon: float = 1e-10) -> torch.Tensor:
if not isinstance(kernel_values1, torch.Tensor):
raise TypeError(f"Input kernel_values1 type is not a torch.Tensor. Got {type(kernel_values1)}")
if not isinstance(kernel_values2, torch.Tensor):
raise TypeError(f"Input kernel_values2 type is not a torch.Tensor. Got {type(kernel_values2)}")
if not kernel_values1.dim() == 3:
raise ValueError("Input kernel_values1 must be a of the shape BxN." " Got {}".format(kernel_values1.shape))
if not kernel_values2.dim() == 3:
raise ValueError("Input kernel_values2 must be a of the shape BxN." " Got {}".format(kernel_values2.shape))
if kernel_values1.shape != kernel_values2.shape:
raise ValueError(
"Inputs kernel_values1 and kernel_values2 must have the same shape."
" Got {} and {}".format(kernel_values1.shape, kernel_values2.shape)
)
joint_kernel_values = torch.matmul(kernel_values1.transpose(1, 2), kernel_values2)
normalization = torch.sum(joint_kernel_values, dim=(1, 2)).view(-1, 1, 1) + epsilon
pdf = joint_kernel_values / normalization
return pdf
def histogram(x: torch.Tensor, bins: torch.Tensor, bandwidth: torch.Tensor, epsilon: float = 1e-10) -> torch.Tensor:
pdf, _ = marginal_pdf(x.unsqueeze(2), bins, bandwidth, epsilon)
return pdf
def histogram2d(
x1: torch.Tensor, x2: torch.Tensor, bins: torch.Tensor, bandwidth: torch.Tensor, epsilon: float = 1e-10
) -> torch.Tensor:
_, kernel_values1 = marginal_pdf(x1.unsqueeze(2), bins, bandwidth, epsilon)
_, kernel_values2 = marginal_pdf(x2.unsqueeze(2), bins, bandwidth, epsilon)
pdf = joint_pdf(kernel_values1, kernel_values2)
return pdf
def image_histogram2d(
image: torch.Tensor,
min: float = 0.0,
max: float = 255.0,
n_bins: int = 256,
bandwidth: Optional[float] = None,
centers: Optional[torch.Tensor] = None,
return_pdf: bool = False,
kernel: str = "triangular",
eps: float = 1e-10,
) -> Tuple[torch.Tensor, torch.Tensor]:
if image is not None and not isinstance(image, torch.Tensor):
raise TypeError(f"Input image type is not a torch.Tensor. Got {type(image)}.")
if centers is not None and not isinstance(centers, torch.Tensor):
raise TypeError(f"Bins' centers type is not a torch.Tensor. Got {type(centers)}.")
if centers is not None and len(centers.shape) > 0 and centers.dim() != 1:
raise ValueError(f"Bins' centers must be a torch.Tensor of the shape (n_bins,). Got {centers.shape}.")
if not isinstance(min, float):
raise TypeError(f'Type of lower end of the range is not a float. Got {type(min)}.')
if not isinstance(max, float):
raise TypeError(f"Type of upper end of the range is not a float. Got {type(min)}.")
if not isinstance(n_bins, int):
raise TypeError(f"Type of number of bins is not an int. Got {type(n_bins)}.")
if bandwidth is not None and not isinstance(bandwidth, float):
raise TypeError(f"Bandwidth type is not a float. Got {type(bandwidth)}.")
if not isinstance(return_pdf, bool):
raise TypeError(f"Return_pdf type is not a bool. Got {type(return_pdf)}.")
if bandwidth is None:
bandwidth = (max - min) / n_bins
if centers is None:
centers = min + bandwidth * (torch.arange(n_bins, device=image.device, dtype=image.dtype) + 0.5)
centers = centers.reshape(-1, 1, 1, 1, 1)
u = torch.abs(image.unsqueeze(0) - centers) / bandwidth
if kernel == "gaussian":
kernel_values = torch.exp(-0.5 * u ** 2)
elif kernel in ("triangular", "uniform", "epanechnikov",):
mask = (u <= 1).to(u.dtype)
if kernel == "triangular":
kernel_values = (1. - u) * mask
elif kernel == "uniform":
kernel_values = torch.ones_like(u) * mask
else:
kernel_values = (1. - u ** 2) * mask
else:
raise ValueError(f"Kernel must be 'triangular', 'gaussian', " f"'uniform' or 'epanechnikov'. Got {kernel}.")
hist = torch.sum(kernel_values, dim=(-2, -1)).permute(1, 2, 0)
if return_pdf:
normalization = torch.sum(hist, dim=-1, keepdim=True) + eps
pdf = hist / normalization
if image.dim() == 2:
hist = hist.squeeze()
pdf = pdf.squeeze()
elif image.dim() == 3:
hist = hist.squeeze(0)
pdf = pdf.squeeze(0)
return hist, pdf
if image.dim() == 2:
hist = hist.squeeze()
elif image.dim() == 3:
hist = hist.squeeze(0)
return hist, torch.zeros_like(hist)
| true | true |
f71ef9efed3b82abc8f4e9d4d5ce1687926182ce | 2,633 | py | Python | util/annotate-image.py | sasile/openface | 5946f37d6045820dfa4e81ba2baac719563abc87 | [
"Apache-2.0"
] | 31 | 2016-05-27T21:27:06.000Z | 2021-07-19T16:11:43.000Z | util/annotate-image.py | sasile/openface | 5946f37d6045820dfa4e81ba2baac719563abc87 | [
"Apache-2.0"
] | 2 | 2021-04-30T20:33:54.000Z | 2022-02-11T03:39:24.000Z | util/annotate-image.py | sasile/openface | 5946f37d6045820dfa4e81ba2baac719563abc87 | [
"Apache-2.0"
] | 20 | 2016-01-26T00:38:09.000Z | 2020-02-06T18:51:55.000Z | #!/usr/bin/env python2
#
# Copyright 2015-2016 Carnegie Mellon University
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# TODO: This file is incomplete.
import os
import sys
fileDir = os.path.dirname(os.path.realpath(__file__))
sys.path.append(os.path.join(fileDir, ".."))
import argparse
import cv2
# import openface.helper
from openface.alignment import NaiveDlib
modelDir = os.path.join(fileDir, '..', 'models')
dlibModelDir = os.path.join(modelDir, 'dlib')
openfaceModelDir = os.path.join(modelDir, 'openface')
def main(args):
align = NaiveDlib(args.dlibFacePredictor)
bgrImg = cv2.imread(args.img)
if bgrImg is None:
raise Exception("Unable to load image: {}".format(args.img))
rgbImg = cv2.cvtColor(bgrImg, cv2.COLOR_BGR2RGB)
bb = align.getLargestFaceBoundingBox(rgbImg)
if bb is None:
raise Exception("Unable to find a face: {}".format(args.img))
landmarks = align.align(rgbImg, bb)
if landmarks is None:
raise Exception("Unable to align image: {}".format(args.img))
# alignedFace = align.alignImg("affine", args.size, rgbImg, bb, landmarks)
bl = (bb.left(), bb.bottom())
tr = (bb.right(), bb.top())
cv2.rectangle(bgrImg, bl, tr, color=(153, 255, 204), thickness=3)
for landmark in landmarks:
cv2.circle(bgrImg, center=landmark, radius=3, color=(102, 204, 255), thickness=-1)
print("Saving image to 'annotated.png'")
cv2.imwrite("annotated.png", bgrImg)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('img', type=str, help="Input image.")
parser.add_argument('--dlibFacePredictor', type=str, help="Path to dlib's face predictor.",
default=os.path.join(dlibModelDir, "shape_predictor_68_face_landmarks.dat"))
parser.add_argument('landmarks', type=str,
choices=['outerEyesAndNose', 'innerEyesAndBottomLip'],
help='The landmarks to align to.')
parser.add_argument('--size', type=int, help="Default image size.",
default=96)
args = parser.parse_args()
main(args)
| 34.644737 | 100 | 0.684011 |
import os
import sys
fileDir = os.path.dirname(os.path.realpath(__file__))
sys.path.append(os.path.join(fileDir, ".."))
import argparse
import cv2
from openface.alignment import NaiveDlib
modelDir = os.path.join(fileDir, '..', 'models')
dlibModelDir = os.path.join(modelDir, 'dlib')
openfaceModelDir = os.path.join(modelDir, 'openface')
def main(args):
align = NaiveDlib(args.dlibFacePredictor)
bgrImg = cv2.imread(args.img)
if bgrImg is None:
raise Exception("Unable to load image: {}".format(args.img))
rgbImg = cv2.cvtColor(bgrImg, cv2.COLOR_BGR2RGB)
bb = align.getLargestFaceBoundingBox(rgbImg)
if bb is None:
raise Exception("Unable to find a face: {}".format(args.img))
landmarks = align.align(rgbImg, bb)
if landmarks is None:
raise Exception("Unable to align image: {}".format(args.img))
bl = (bb.left(), bb.bottom())
tr = (bb.right(), bb.top())
cv2.rectangle(bgrImg, bl, tr, color=(153, 255, 204), thickness=3)
for landmark in landmarks:
cv2.circle(bgrImg, center=landmark, radius=3, color=(102, 204, 255), thickness=-1)
print("Saving image to 'annotated.png'")
cv2.imwrite("annotated.png", bgrImg)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('img', type=str, help="Input image.")
parser.add_argument('--dlibFacePredictor', type=str, help="Path to dlib's face predictor.",
default=os.path.join(dlibModelDir, "shape_predictor_68_face_landmarks.dat"))
parser.add_argument('landmarks', type=str,
choices=['outerEyesAndNose', 'innerEyesAndBottomLip'],
help='The landmarks to align to.')
parser.add_argument('--size', type=int, help="Default image size.",
default=96)
args = parser.parse_args()
main(args)
| true | true |
f71efa967077a2e3a9d5fff7e6ea1107f61cda2a | 579 | py | Python | arkav_is_api/quiz/migrations/0016_auto_20181214_0043.py | arkavidia5/arkav-is | 6c6e8d091ead5bfff664d86f7903c62209800031 | [
"MIT"
] | 3 | 2018-10-01T16:30:15.000Z | 2020-01-06T09:03:57.000Z | arkav_is_api/quiz/migrations/0016_auto_20181214_0043.py | arkavidia5/arkav-is | 6c6e8d091ead5bfff664d86f7903c62209800031 | [
"MIT"
] | 4 | 2018-11-03T10:56:52.000Z | 2020-04-26T06:54:16.000Z | arkav_is_api/quiz/migrations/0016_auto_20181214_0043.py | arkavidia5/arkav-is | 6c6e8d091ead5bfff664d86f7903c62209800031 | [
"MIT"
] | 2 | 2018-09-26T16:28:01.000Z | 2019-09-04T06:23:14.000Z | # Generated by Django 2.1.4 on 2018-12-13 17:43
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('quiz', '0015_auto_20181209_0433'),
]
operations = [
migrations.AlterField(
model_name='quizattempt',
name='finish_time',
field=models.DateTimeField(blank=True, null=True),
),
migrations.AlterField(
model_name='quizattempt',
name='start_time',
field=models.DateTimeField(blank=True, null=True),
),
]
| 24.125 | 62 | 0.592401 |
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('quiz', '0015_auto_20181209_0433'),
]
operations = [
migrations.AlterField(
model_name='quizattempt',
name='finish_time',
field=models.DateTimeField(blank=True, null=True),
),
migrations.AlterField(
model_name='quizattempt',
name='start_time',
field=models.DateTimeField(blank=True, null=True),
),
]
| true | true |
f71efa9e075feb99d16e9b1180b94b2da938dc88 | 1,531 | py | Python | batsky/batsky_sim.py | oar-team/batsky | 3e9f14a4bdaac56d934c31f888858ac7a9f645c8 | [
"BSD-3-Clause"
] | null | null | null | batsky/batsky_sim.py | oar-team/batsky | 3e9f14a4bdaac56d934c31f888858ac7a9f645c8 | [
"BSD-3-Clause"
] | null | null | null | batsky/batsky_sim.py | oar-team/batsky | 3e9f14a4bdaac56d934c31f888858ac7a9f645c8 | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import zmq
import click
import logging
BATSIM_PORT = 28000
@click.command()
@click.option('-d', '--debug', is_flag=True, help='Debug flag.')
@click.option('-l', '--logfile', type=click.STRING, help='Specify log file.')
@click.option('-c', '--controller', type=click.STRING, help='Specify which hostname is the controller.')
@click.option('-f', '--workload-file', type=click.STRING, help='Specify workload file.')
@click.option('-s', '--start-time', type=click.float, default=0.0,
help='Specify start time of simulation')
def cli(debug, logfile, controller, workload_file):
if debug:
logger.setLevel(logging.DEBUG)
if logfile:
fh = logging.FileHandler(logfile)
fh.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(asctime)s %(levelname)-6s %(message)s')
fh.setFormatter(formatter)
logger.addHandler(fh)
else:
handler = logging.StreamHandler()
formatter = logging.Formatter('%(asctime)s %(levelname)-6s %(message)s')
handler.setFormatter(formatter)
logger.addHandler(handler)
logger.info('Simple delay job only bat-simulator')
context = zmq.Context()
batsim_sock = context.socket(zmq.PAIR)
batsim_sock.bind("tcp://*:{}".format(BATSIM_PORT))
#controller_sock.send_json({'wjid': int(workload_jobid), 'nodeset': str(nodeset),
# 'port': int(finalize_port)})
if __name__ == '__main__':
cli()
| 32.574468 | 104 | 0.639451 |
import zmq
import click
import logging
BATSIM_PORT = 28000
@click.command()
@click.option('-d', '--debug', is_flag=True, help='Debug flag.')
@click.option('-l', '--logfile', type=click.STRING, help='Specify log file.')
@click.option('-c', '--controller', type=click.STRING, help='Specify which hostname is the controller.')
@click.option('-f', '--workload-file', type=click.STRING, help='Specify workload file.')
@click.option('-s', '--start-time', type=click.float, default=0.0,
help='Specify start time of simulation')
def cli(debug, logfile, controller, workload_file):
if debug:
logger.setLevel(logging.DEBUG)
if logfile:
fh = logging.FileHandler(logfile)
fh.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(asctime)s %(levelname)-6s %(message)s')
fh.setFormatter(formatter)
logger.addHandler(fh)
else:
handler = logging.StreamHandler()
formatter = logging.Formatter('%(asctime)s %(levelname)-6s %(message)s')
handler.setFormatter(formatter)
logger.addHandler(handler)
logger.info('Simple delay job only bat-simulator')
context = zmq.Context()
batsim_sock = context.socket(zmq.PAIR)
batsim_sock.bind("tcp://*:{}".format(BATSIM_PORT))
if __name__ == '__main__':
cli()
| true | true |
f71efbaaaabe7fbbea180e4b469c59a3c7315938 | 1,719 | py | Python | spider/featurization/load_subclip_audio.py | Rosna/P4ML-UI | edf0dd830588f03b197e4d6532830a5aedd88424 | [
"Apache-2.0"
] | 1 | 2021-11-05T17:42:47.000Z | 2021-11-05T17:42:47.000Z | spider/featurization/load_subclip_audio.py | Rosna/P4ML-UI | edf0dd830588f03b197e4d6532830a5aedd88424 | [
"Apache-2.0"
] | null | null | null | spider/featurization/load_subclip_audio.py | Rosna/P4ML-UI | edf0dd830588f03b197e4d6532830a5aedd88424 | [
"Apache-2.0"
] | 2 | 2019-02-21T18:29:51.000Z | 2019-09-02T21:21:26.000Z | import argparse
import librosa
import numpy as np
def make_subclips(audio, sr, clip_size, pad=True):
# Given a list of audio files and corresponding sample rates,
# return a 2D list of subclips, each of size clip_size
# Optional padding takes care of audio files shorter than clip size
clips = []
for idx, a in enumerate(audio):
# Size of a single clip in samples
step = int(sr[idx] * clip_size)
# Optional padding for short clips
overhang = len(a) % step
if overhang != 0 and pad:
a = np.concatenate([a, np.zeros(step - overhang)])
subclips = []
for start in range(0, len(a), step):
end = start + step
if end > len(a):
break
subclips.append(a[start : end])
return subclips
def main(audio_file, clip_size):
# In python 2.7, librosa.load does not correctly handle 24-bit wav files.
# This is resolved in python 3.x
#
# If the sr parameter is set to None, loads the actual sampling rate
# from the audio file. Otherwise, will load the audio file and resample
# it to the given sample rate. This is good if you want all audio at the
# same sample rate, but can be slow. Default is 22050 Hz.
audio, sr = librosa.load(audio_file, sr=None)
# We just have one audio file here, but this should work for any number
audio_subclips = make_subclips([audio], [sr], 1.0)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--audio_file', type=str, required=True)
parser.add_argument('--clip_size', type=float, default=0)
args = parser.parse_args()
main(args.audio_file, args.clip_size)
| 33.705882 | 77 | 0.649215 | import argparse
import librosa
import numpy as np
def make_subclips(audio, sr, clip_size, pad=True):
clips = []
for idx, a in enumerate(audio):
step = int(sr[idx] * clip_size)
overhang = len(a) % step
if overhang != 0 and pad:
a = np.concatenate([a, np.zeros(step - overhang)])
subclips = []
for start in range(0, len(a), step):
end = start + step
if end > len(a):
break
subclips.append(a[start : end])
return subclips
def main(audio_file, clip_size):
audio, sr = librosa.load(audio_file, sr=None)
audio_subclips = make_subclips([audio], [sr], 1.0)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--audio_file', type=str, required=True)
parser.add_argument('--clip_size', type=float, default=0)
args = parser.parse_args()
main(args.audio_file, args.clip_size)
| true | true |
f71efc7fd3ff76719e0c49a1d47b97d3eeeeef66 | 13,063 | py | Python | systemvm/patches/debian/config/opt/cloud/bin/cs/CsRedundant.py | vispractice/cloudstack | d543e2aa2c05422559d866c8b2ae29c83bfd5da0 | [
"Apache-2.0"
] | null | null | null | systemvm/patches/debian/config/opt/cloud/bin/cs/CsRedundant.py | vispractice/cloudstack | d543e2aa2c05422559d866c8b2ae29c83bfd5da0 | [
"Apache-2.0"
] | null | null | null | systemvm/patches/debian/config/opt/cloud/bin/cs/CsRedundant.py | vispractice/cloudstack | d543e2aa2c05422559d866c8b2ae29c83bfd5da0 | [
"Apache-2.0"
] | null | null | null | # -- coding: utf-8 --
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# -------------------------------------------------------------------- #
# Notes
# -------------------------------------------------------------------- #
# Vrouter
#
# eth0 router gateway IP for isolated network
# eth1 Control IP for hypervisor
# eth2 public ip(s)
#
# VPC Router
#
# eth0 control interface
# eth1 public ip
# eth2+ Guest networks
# -------------------------------------------------------------------- #
import os
import logging
import CsHelper
from CsFile import CsFile
from CsProcess import CsProcess
from CsApp import CsPasswdSvc
from CsAddress import CsDevice
import socket
from time import sleep
class CsRedundant(object):
CS_RAMDISK_DIR = "/ramdisk"
CS_PRIO_UP = 1
CS_PRIO_DOWN = -1
CS_ROUTER_DIR = "%s/rrouter" % CS_RAMDISK_DIR
CS_TEMPLATES = [
"heartbeat.sh.templ", "check_heartbeat.sh.templ",
"arping_gateways.sh.templ"
]
CS_TEMPLATES_DIR = "/opt/cloud/templates"
CONNTRACKD_BIN = "/usr/sbin/conntrackd"
CONNTRACKD_KEEPALIVED_CONFLOCK = "/var/lock/conntrack.lock"
CONNTRACKD_CONF = "/etc/conntrackd/conntrackd.conf"
RROUTER_LOG = "/var/log/cloud.log"
KEEPALIVED_CONF = "/etc/keepalived/keepalived.conf"
def __init__(self, config):
self.cl = config.cmdline()
self.address = config.address()
self.config = config
def set(self):
logging.debug("Router redundancy status is %s", self.cl.is_redundant())
if self.cl.is_redundant():
self._redundant_on()
else:
self._redundant_off()
def _redundant_off(self):
CsHelper.service("conntrackd", "stop")
CsHelper.service("keepalived", "stop")
CsHelper.umount_tmpfs(self.CS_RAMDISK_DIR)
CsHelper.rmdir(self.CS_RAMDISK_DIR)
CsHelper.rm(self.CONNTRACKD_CONF)
CsHelper.rm(self.KEEPALIVED_CONF)
def _redundant_on(self):
guest = self.address.get_guest_if()
# No redundancy if there is no guest network
if self.cl.is_master() or guest is None:
for obj in [o for o in self.address.get_ips() if o.is_public()]:
self.check_is_up(obj.get_device())
if guest is None:
self._redundant_off()
return
CsHelper.mkdir(self.CS_RAMDISK_DIR, 0755, False)
CsHelper.mount_tmpfs(self.CS_RAMDISK_DIR)
CsHelper.mkdir(self.CS_ROUTER_DIR, 0755, False)
for s in self.CS_TEMPLATES:
d = s
if s.endswith(".templ"):
d = s.replace(".templ", "")
CsHelper.copy_if_needed(
"%s/%s" % (self.CS_TEMPLATES_DIR, s), "%s/%s" % (self.CS_ROUTER_DIR, d))
CsHelper.copy(
"%s/%s" % (self.CS_TEMPLATES_DIR, "keepalived.conf.templ"), self.KEEPALIVED_CONF)
CsHelper.copy_if_needed(
"%s/%s" % (self.CS_TEMPLATES_DIR, "conntrackd.conf.templ"), self.CONNTRACKD_CONF)
CsHelper.copy_if_needed(
"%s/%s" % (self.CS_TEMPLATES_DIR, "checkrouter.sh.templ"), "/opt/cloud/bin/checkrouter.sh")
CsHelper.execute(
'sed -i "s/--exec\ \$DAEMON;/--exec\ \$DAEMON\ --\ --vrrp;/g" /etc/init.d/keepalived')
# checkrouter.sh configuration
check_router = CsFile("/opt/cloud/bin/checkrouter.sh")
check_router.greplace("[RROUTER_LOG]", self.RROUTER_LOG)
check_router.commit()
# keepalived configuration
keepalived_conf = CsFile(self.KEEPALIVED_CONF)
keepalived_conf.search(
" router_id ", " router_id %s" % self.cl.get_name())
keepalived_conf.search(
" interface ", " interface %s" % guest.get_device())
keepalived_conf.search(
" virtual_router_id ", " virtual_router_id %s" % self.cl.get_router_id())
keepalived_conf.greplace("[RROUTER_BIN_PATH]", self.CS_ROUTER_DIR)
keepalived_conf.section("authentication {", "}", [
" auth_type AH \n", " auth_pass %s\n" % self.cl.get_router_password()])
keepalived_conf.section(
"virtual_ipaddress {", "}", self._collect_ips())
keepalived_conf.commit()
# conntrackd configuration
connt = CsFile(self.CONNTRACKD_CONF)
if guest is not None:
connt.section("Multicast {", "}", [
"IPv4_address 225.0.0.50\n",
"Group 3780\n",
"IPv4_interface %s\n" % guest.get_ip(),
"Interface %s\n" % guest.get_device(),
"SndSocketBuffer 1249280\n",
"RcvSocketBuffer 1249280\n",
"Checksum on\n"])
connt.section("Address Ignore {", "}", self._collect_ignore_ips())
connt.commit()
if connt.is_changed():
CsHelper.service("conntrackd", "restart")
# Configure heartbeat cron job - runs every 30 seconds
heartbeat_cron = CsFile("/etc/cron.d/heartbeat")
heartbeat_cron.add("SHELL=/bin/bash", 0)
heartbeat_cron.add(
"PATH=/usr/local/sbin:/usr/local/bin:/sbin:/bin:/usr/sbin:/usr/bin", 1)
heartbeat_cron.add(
"* * * * * root $SHELL %s/check_heartbeat.sh 2>&1 > /dev/null" % self.CS_ROUTER_DIR, -1)
heartbeat_cron.add(
"* * * * * root sleep 30; $SHELL %s/check_heartbeat.sh 2>&1 > /dev/null" % self.CS_ROUTER_DIR, -1)
heartbeat_cron.commit()
# Configure KeepaliveD cron job - runs at every reboot
keepalived_cron = CsFile("/etc/cron.d/keepalived")
keepalived_cron.add("SHELL=/bin/bash", 0)
keepalived_cron.add(
"PATH=/usr/local/sbin:/usr/local/bin:/sbin:/bin:/usr/sbin:/usr/bin", 1)
keepalived_cron.add("@reboot root service keepalived start", -1)
keepalived_cron.commit()
# Configure ConntrackD cron job - runs at every reboot
conntrackd_cron = CsFile("/etc/cron.d/conntrackd")
conntrackd_cron.add("SHELL=/bin/bash", 0)
conntrackd_cron.add(
"PATH=/usr/local/sbin:/usr/local/bin:/sbin:/bin:/usr/sbin:/usr/bin", 1)
conntrackd_cron.add("@reboot root service conntrackd start", -1)
conntrackd_cron.commit()
proc = CsProcess(['/usr/sbin/keepalived', '--vrrp'])
if not proc.find() or keepalived_conf.is_changed():
CsHelper.service("keepalived", "restart")
def release_lock(self):
try:
os.remove("/tmp/master_lock")
except OSError:
pass
def set_lock(self):
"""
Make sure that master state changes happen sequentially
"""
iterations = 10
time_between = 1
for iter in range(0, iterations):
try:
s = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
s.bind('/tmp/master_lock')
return s
except socket.error, e:
error_code = e.args[0]
error_string = e.args[1]
print "Process already running (%d:%s). Exiting" % (error_code, error_string)
logging.info("Master is already running, waiting")
sleep(time_between)
def set_fault(self):
""" Set fault mode on this router """
if not self.cl.is_redundant():
logging.error("Set fault called on non-redundant router")
return
self.set_lock()
logging.info("Router switched to fault mode")
ads = [o for o in self.address.get_ips() if o.is_public()]
for o in ads:
CsHelper.execute("ifconfig %s down" % o.get_device())
cmd = "%s -C %s" % (self.CONNTRACKD_BIN, self.CONNTRACKD_CONF)
CsHelper.execute("%s -s" % cmd)
CsHelper.service("ipsec", "stop")
CsHelper.service("xl2tpd", "stop")
CsHelper.service("dnsmasq", "stop")
ads = [o for o in self.address.get_ips() if o.needs_vrrp()]
for o in ads:
CsPasswdSvc(o.get_gateway()).stop()
self.cl.set_fault_state()
self.cl.save()
self.release_lock()
logging.info("Router switched to fault mode")
def set_backup(self):
""" Set the current router to backup """
if not self.cl.is_redundant():
logging.error("Set backup called on non-redundant router")
return
self.set_lock()
logging.debug("Setting router to backup")
ads = [o for o in self.address.get_ips() if o.is_public()]
dev = ''
for o in ads:
if dev == o.get_device():
continue
logging.info("Bringing public interface %s down" % o.get_device())
cmd2 = "ip link set %s down" % o.get_device()
CsHelper.execute(cmd2)
dev = o.get_device()
cmd = "%s -C %s" % (self.CONNTRACKD_BIN, self.CONNTRACKD_CONF)
CsHelper.execute("%s -d" % cmd)
CsHelper.service("ipsec", "stop")
CsHelper.service("xl2tpd", "stop")
ads = [o for o in self.address.get_ips() if o.needs_vrrp()]
for o in ads:
CsPasswdSvc(o.get_gateway()).stop()
CsHelper.service("dnsmasq", "stop")
self.cl.set_master_state(False)
self.cl.save()
self.release_lock()
logging.info("Router switched to backup mode")
def set_master(self):
""" Set the current router to master """
if not self.cl.is_redundant():
logging.error("Set master called on non-redundant router")
return
self.set_lock()
logging.debug("Setting router to master")
self.address.process()
logging.info("added default routes")
# ip route add default via $gw table Table_$dev proto static
cmd = "%s -C %s" % (self.CONNTRACKD_BIN, self.CONNTRACKD_CONF)
CsHelper.execute("%s -c" % cmd)
CsHelper.execute("%s -f" % cmd)
CsHelper.execute("%s -R" % cmd)
CsHelper.execute("%s -B" % cmd)
CsHelper.service("ipsec", "restart")
CsHelper.service("xl2tpd", "restart")
ads = [o for o in self.address.get_ips() if o.needs_vrrp()]
for o in ads:
CsPasswdSvc(o.get_gateway()).restart()
CsHelper.service("dnsmasq", "restart")
self.cl.set_master_state(True)
self.cl.save()
self.release_lock()
logging.info("Router switched to master mode")
def _collect_ignore_ips(self):
"""
This returns a list of ip objects that should be ignored
by conntrackd
"""
lines = []
lines.append("\t\t\tIPv4_address %s\n" % "127.0.0.1")
lines.append("\t\t\tIPv4_address %s\n" %
self.address.get_control_if().get_ip())
# FIXME - Do we need to also add any internal network gateways?
return lines
def _collect_ips(self):
"""
Construct a list containing all the ips that need to be looked afer by vrrp
This is based upon the address_needs_vrrp method in CsAddress which looks at
the network type and decides if it is an internal address or an external one
In a DomR there will only ever be one address in a VPC there can be many
The new code also gives the possibility to cloudstack to have a hybrid device
thet could function as a router and VPC router at the same time
"""
lines = []
for o in self.address.get_ips():
if o.needs_vrrp():
cmdline=self.config.get_cmdline_instance()
if(cmdline.get_type()=='router'):
str = " %s brd %s dev %s\n" % (cmdline.get_guest_gw(), o.get_broadcast(), o.get_device())
else:
str = " %s brd %s dev %s\n" % (o.get_ip(), o.get_broadcast(), o.get_device())
lines.append(str)
self.check_is_up(o.get_device())
return lines
def check_is_up(self, device):
""" Ensure device is up """
cmd = "ip link show %s | grep 'state DOWN'" % device
for i in CsHelper.execute(cmd):
if " DOWN " in i:
cmd2 = "ip link set %s up" % device
CsHelper.execute(cmd2)
| 40.193846 | 117 | 0.586695 |
import os
import logging
import CsHelper
from CsFile import CsFile
from CsProcess import CsProcess
from CsApp import CsPasswdSvc
from CsAddress import CsDevice
import socket
from time import sleep
class CsRedundant(object):
CS_RAMDISK_DIR = "/ramdisk"
CS_PRIO_UP = 1
CS_PRIO_DOWN = -1
CS_ROUTER_DIR = "%s/rrouter" % CS_RAMDISK_DIR
CS_TEMPLATES = [
"heartbeat.sh.templ", "check_heartbeat.sh.templ",
"arping_gateways.sh.templ"
]
CS_TEMPLATES_DIR = "/opt/cloud/templates"
CONNTRACKD_BIN = "/usr/sbin/conntrackd"
CONNTRACKD_KEEPALIVED_CONFLOCK = "/var/lock/conntrack.lock"
CONNTRACKD_CONF = "/etc/conntrackd/conntrackd.conf"
RROUTER_LOG = "/var/log/cloud.log"
KEEPALIVED_CONF = "/etc/keepalived/keepalived.conf"
def __init__(self, config):
self.cl = config.cmdline()
self.address = config.address()
self.config = config
def set(self):
logging.debug("Router redundancy status is %s", self.cl.is_redundant())
if self.cl.is_redundant():
self._redundant_on()
else:
self._redundant_off()
def _redundant_off(self):
CsHelper.service("conntrackd", "stop")
CsHelper.service("keepalived", "stop")
CsHelper.umount_tmpfs(self.CS_RAMDISK_DIR)
CsHelper.rmdir(self.CS_RAMDISK_DIR)
CsHelper.rm(self.CONNTRACKD_CONF)
CsHelper.rm(self.KEEPALIVED_CONF)
def _redundant_on(self):
guest = self.address.get_guest_if()
if self.cl.is_master() or guest is None:
for obj in [o for o in self.address.get_ips() if o.is_public()]:
self.check_is_up(obj.get_device())
if guest is None:
self._redundant_off()
return
CsHelper.mkdir(self.CS_RAMDISK_DIR, 0755, False)
CsHelper.mount_tmpfs(self.CS_RAMDISK_DIR)
CsHelper.mkdir(self.CS_ROUTER_DIR, 0755, False)
for s in self.CS_TEMPLATES:
d = s
if s.endswith(".templ"):
d = s.replace(".templ", "")
CsHelper.copy_if_needed(
"%s/%s" % (self.CS_TEMPLATES_DIR, s), "%s/%s" % (self.CS_ROUTER_DIR, d))
CsHelper.copy(
"%s/%s" % (self.CS_TEMPLATES_DIR, "keepalived.conf.templ"), self.KEEPALIVED_CONF)
CsHelper.copy_if_needed(
"%s/%s" % (self.CS_TEMPLATES_DIR, "conntrackd.conf.templ"), self.CONNTRACKD_CONF)
CsHelper.copy_if_needed(
"%s/%s" % (self.CS_TEMPLATES_DIR, "checkrouter.sh.templ"), "/opt/cloud/bin/checkrouter.sh")
CsHelper.execute(
'sed -i "s/--exec\ \$DAEMON;/--exec\ \$DAEMON\ --\ --vrrp;/g" /etc/init.d/keepalived')
check_router = CsFile("/opt/cloud/bin/checkrouter.sh")
check_router.greplace("[RROUTER_LOG]", self.RROUTER_LOG)
check_router.commit()
keepalived_conf = CsFile(self.KEEPALIVED_CONF)
keepalived_conf.search(
" router_id ", " router_id %s" % self.cl.get_name())
keepalived_conf.search(
" interface ", " interface %s" % guest.get_device())
keepalived_conf.search(
" virtual_router_id ", " virtual_router_id %s" % self.cl.get_router_id())
keepalived_conf.greplace("[RROUTER_BIN_PATH]", self.CS_ROUTER_DIR)
keepalived_conf.section("authentication {", "}", [
" auth_type AH \n", " auth_pass %s\n" % self.cl.get_router_password()])
keepalived_conf.section(
"virtual_ipaddress {", "}", self._collect_ips())
keepalived_conf.commit()
connt = CsFile(self.CONNTRACKD_CONF)
if guest is not None:
connt.section("Multicast {", "}", [
"IPv4_address 225.0.0.50\n",
"Group 3780\n",
"IPv4_interface %s\n" % guest.get_ip(),
"Interface %s\n" % guest.get_device(),
"SndSocketBuffer 1249280\n",
"RcvSocketBuffer 1249280\n",
"Checksum on\n"])
connt.section("Address Ignore {", "}", self._collect_ignore_ips())
connt.commit()
if connt.is_changed():
CsHelper.service("conntrackd", "restart")
heartbeat_cron = CsFile("/etc/cron.d/heartbeat")
heartbeat_cron.add("SHELL=/bin/bash", 0)
heartbeat_cron.add(
"PATH=/usr/local/sbin:/usr/local/bin:/sbin:/bin:/usr/sbin:/usr/bin", 1)
heartbeat_cron.add(
"* * * * * root $SHELL %s/check_heartbeat.sh 2>&1 > /dev/null" % self.CS_ROUTER_DIR, -1)
heartbeat_cron.add(
"* * * * * root sleep 30; $SHELL %s/check_heartbeat.sh 2>&1 > /dev/null" % self.CS_ROUTER_DIR, -1)
heartbeat_cron.commit()
keepalived_cron = CsFile("/etc/cron.d/keepalived")
keepalived_cron.add("SHELL=/bin/bash", 0)
keepalived_cron.add(
"PATH=/usr/local/sbin:/usr/local/bin:/sbin:/bin:/usr/sbin:/usr/bin", 1)
keepalived_cron.add("@reboot root service keepalived start", -1)
keepalived_cron.commit()
conntrackd_cron = CsFile("/etc/cron.d/conntrackd")
conntrackd_cron.add("SHELL=/bin/bash", 0)
conntrackd_cron.add(
"PATH=/usr/local/sbin:/usr/local/bin:/sbin:/bin:/usr/sbin:/usr/bin", 1)
conntrackd_cron.add("@reboot root service conntrackd start", -1)
conntrackd_cron.commit()
proc = CsProcess(['/usr/sbin/keepalived', '--vrrp'])
if not proc.find() or keepalived_conf.is_changed():
CsHelper.service("keepalived", "restart")
def release_lock(self):
try:
os.remove("/tmp/master_lock")
except OSError:
pass
def set_lock(self):
"""
Make sure that master state changes happen sequentially
"""
iterations = 10
time_between = 1
for iter in range(0, iterations):
try:
s = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
s.bind('/tmp/master_lock')
return s
except socket.error, e:
error_code = e.args[0]
error_string = e.args[1]
print "Process already running (%d:%s). Exiting" % (error_code, error_string)
logging.info("Master is already running, waiting")
sleep(time_between)
def set_fault(self):
""" Set fault mode on this router """
if not self.cl.is_redundant():
logging.error("Set fault called on non-redundant router")
return
self.set_lock()
logging.info("Router switched to fault mode")
ads = [o for o in self.address.get_ips() if o.is_public()]
for o in ads:
CsHelper.execute("ifconfig %s down" % o.get_device())
cmd = "%s -C %s" % (self.CONNTRACKD_BIN, self.CONNTRACKD_CONF)
CsHelper.execute("%s -s" % cmd)
CsHelper.service("ipsec", "stop")
CsHelper.service("xl2tpd", "stop")
CsHelper.service("dnsmasq", "stop")
ads = [o for o in self.address.get_ips() if o.needs_vrrp()]
for o in ads:
CsPasswdSvc(o.get_gateway()).stop()
self.cl.set_fault_state()
self.cl.save()
self.release_lock()
logging.info("Router switched to fault mode")
def set_backup(self):
""" Set the current router to backup """
if not self.cl.is_redundant():
logging.error("Set backup called on non-redundant router")
return
self.set_lock()
logging.debug("Setting router to backup")
ads = [o for o in self.address.get_ips() if o.is_public()]
dev = ''
for o in ads:
if dev == o.get_device():
continue
logging.info("Bringing public interface %s down" % o.get_device())
cmd2 = "ip link set %s down" % o.get_device()
CsHelper.execute(cmd2)
dev = o.get_device()
cmd = "%s -C %s" % (self.CONNTRACKD_BIN, self.CONNTRACKD_CONF)
CsHelper.execute("%s -d" % cmd)
CsHelper.service("ipsec", "stop")
CsHelper.service("xl2tpd", "stop")
ads = [o for o in self.address.get_ips() if o.needs_vrrp()]
for o in ads:
CsPasswdSvc(o.get_gateway()).stop()
CsHelper.service("dnsmasq", "stop")
self.cl.set_master_state(False)
self.cl.save()
self.release_lock()
logging.info("Router switched to backup mode")
def set_master(self):
""" Set the current router to master """
if not self.cl.is_redundant():
logging.error("Set master called on non-redundant router")
return
self.set_lock()
logging.debug("Setting router to master")
self.address.process()
logging.info("added default routes")
cmd = "%s -C %s" % (self.CONNTRACKD_BIN, self.CONNTRACKD_CONF)
CsHelper.execute("%s -c" % cmd)
CsHelper.execute("%s -f" % cmd)
CsHelper.execute("%s -R" % cmd)
CsHelper.execute("%s -B" % cmd)
CsHelper.service("ipsec", "restart")
CsHelper.service("xl2tpd", "restart")
ads = [o for o in self.address.get_ips() if o.needs_vrrp()]
for o in ads:
CsPasswdSvc(o.get_gateway()).restart()
CsHelper.service("dnsmasq", "restart")
self.cl.set_master_state(True)
self.cl.save()
self.release_lock()
logging.info("Router switched to master mode")
def _collect_ignore_ips(self):
"""
This returns a list of ip objects that should be ignored
by conntrackd
"""
lines = []
lines.append("\t\t\tIPv4_address %s\n" % "127.0.0.1")
lines.append("\t\t\tIPv4_address %s\n" %
self.address.get_control_if().get_ip())
return lines
def _collect_ips(self):
"""
Construct a list containing all the ips that need to be looked afer by vrrp
This is based upon the address_needs_vrrp method in CsAddress which looks at
the network type and decides if it is an internal address or an external one
In a DomR there will only ever be one address in a VPC there can be many
The new code also gives the possibility to cloudstack to have a hybrid device
thet could function as a router and VPC router at the same time
"""
lines = []
for o in self.address.get_ips():
if o.needs_vrrp():
cmdline=self.config.get_cmdline_instance()
if(cmdline.get_type()=='router'):
str = " %s brd %s dev %s\n" % (cmdline.get_guest_gw(), o.get_broadcast(), o.get_device())
else:
str = " %s brd %s dev %s\n" % (o.get_ip(), o.get_broadcast(), o.get_device())
lines.append(str)
self.check_is_up(o.get_device())
return lines
def check_is_up(self, device):
""" Ensure device is up """
cmd = "ip link show %s | grep 'state DOWN'" % device
for i in CsHelper.execute(cmd):
if " DOWN " in i:
cmd2 = "ip link set %s up" % device
CsHelper.execute(cmd2)
| false | true |
f71efc84499c92428e12278751bbf13f4217a6f2 | 3,011 | py | Python | pyrl/esper_ext.py | abesto/pyrl | 34eb9843217ed5b557bff99ed66ef46b49b5c295 | [
"MIT"
] | 10 | 2019-12-03T03:59:41.000Z | 2021-04-13T11:52:20.000Z | pyrl/esper_ext.py | abesto/pyrl | 34eb9843217ed5b557bff99ed66ef46b49b5c295 | [
"MIT"
] | 1 | 2021-04-06T03:28:02.000Z | 2021-05-31T09:34:48.000Z | pyrl/esper_ext.py | abesto/pyrl | 34eb9843217ed5b557bff99ed66ef46b49b5c295 | [
"MIT"
] | 1 | 2019-12-03T02:50:02.000Z | 2019-12-03T02:50:02.000Z | #!/usr/bin/env python
from typing import Any, Dict, Optional, Type, TypeVar
import esper # type: ignore
import tabulate
T = TypeVar("T")
class WorldExt(esper.World):
def __init__(self, timed=False):
super(WorldExt, self).__init__(timed)
self.resources: Dict = {}
def _process(self, *args, **kwargs):
for processor in self._processors:
# print(type(processor))
processor.process(*args, **kwargs)
def add_processors(self, *processors: "Processor"):
for processor in processors:
self.add_processor(processor)
def add_resource(self, resource: T) -> None:
resource_type = getattr(resource, "resource_type", type(resource))
self.resources[resource_type] = resource
def get_resource(self, resource_type: Type[T]) -> T:
return self.resources[resource_type]
def try_resource(self, resource_type: Type[T]) -> Optional[T]:
return self.resources.get(resource_type, None)
def remove_resource(self, resource_type: T) -> None:
if resource_type in self.resources:
del self.resources[resource_type]
def add_component(self, entity: int, component_instance: Any) -> None:
"""Add a new Component instance to an Entity.
Copy-paste from Esper, but supports overriding the component type.
:param entity: The Entity to associate the Component with.
:param component_instance: A Component instance.
"""
component_type = getattr(
component_instance, "component_type", type(component_instance)
)
if component_type not in self._components:
self._components[component_type] = set()
self._components[component_type].add(entity)
if entity not in self._entities:
self._entities[entity] = {}
self._entities[entity][component_type] = component_instance
self.clear_cache()
def add_components(self, entity: int, *component_instances: Any) -> None:
for component in component_instances:
self.add_component(entity, component)
def try_component(self, entity: int, component_type: Type[T]) -> Optional[T]:
"""Try to get a single component type for an Entity.
Copy-paste from Esper, except it returns instead of yielding, because
yielding doesn't actually make any sense here
"""
if component_type in self._entities[entity]:
return self._entities[entity][component_type]
else:
return None
class Processor(esper.Processor):
world: WorldExt
def process(self, *args, **kwargs) -> None:
raise NotImplementedError
def debug_world(world: esper.World, *with_components: Type[Any]) -> None:
data = []
for ent, _ in world.get_components(*with_components):
components = world.components_for_entity(ent)
data.append(dict({"ent": ent}, **{str(type(c)): c for c in components}))
print(tabulate.tabulate(data))
| 33.087912 | 81 | 0.659582 |
from typing import Any, Dict, Optional, Type, TypeVar
import esper
import tabulate
T = TypeVar("T")
class WorldExt(esper.World):
def __init__(self, timed=False):
super(WorldExt, self).__init__(timed)
self.resources: Dict = {}
def _process(self, *args, **kwargs):
for processor in self._processors:
processor.process(*args, **kwargs)
def add_processors(self, *processors: "Processor"):
for processor in processors:
self.add_processor(processor)
def add_resource(self, resource: T) -> None:
resource_type = getattr(resource, "resource_type", type(resource))
self.resources[resource_type] = resource
def get_resource(self, resource_type: Type[T]) -> T:
return self.resources[resource_type]
def try_resource(self, resource_type: Type[T]) -> Optional[T]:
return self.resources.get(resource_type, None)
def remove_resource(self, resource_type: T) -> None:
if resource_type in self.resources:
del self.resources[resource_type]
def add_component(self, entity: int, component_instance: Any) -> None:
component_type = getattr(
component_instance, "component_type", type(component_instance)
)
if component_type not in self._components:
self._components[component_type] = set()
self._components[component_type].add(entity)
if entity not in self._entities:
self._entities[entity] = {}
self._entities[entity][component_type] = component_instance
self.clear_cache()
def add_components(self, entity: int, *component_instances: Any) -> None:
for component in component_instances:
self.add_component(entity, component)
def try_component(self, entity: int, component_type: Type[T]) -> Optional[T]:
if component_type in self._entities[entity]:
return self._entities[entity][component_type]
else:
return None
class Processor(esper.Processor):
world: WorldExt
def process(self, *args, **kwargs) -> None:
raise NotImplementedError
def debug_world(world: esper.World, *with_components: Type[Any]) -> None:
data = []
for ent, _ in world.get_components(*with_components):
components = world.components_for_entity(ent)
data.append(dict({"ent": ent}, **{str(type(c)): c for c in components}))
print(tabulate.tabulate(data))
| true | true |
f71efc870b7a9b185f58cab460182c3571267535 | 1,288 | py | Python | savu/test/travis/plugin_tests/filter_tests/hilbert_test.py | malte-storm/Savu | 16291e8a22464c50c511af01fbc648860c1236e6 | [
"Apache-2.0"
] | 1 | 2021-04-18T09:30:54.000Z | 2021-04-18T09:30:54.000Z | savu/test/travis/plugin_tests/filter_tests/hilbert_test.py | reubenlindroos/Savu | 4bf93b818df41fc8c231b23f48393cb8fa00255d | [
"Apache-2.0"
] | null | null | null | savu/test/travis/plugin_tests/filter_tests/hilbert_test.py | reubenlindroos/Savu | 4bf93b818df41fc8c231b23f48393cb8fa00255d | [
"Apache-2.0"
] | 1 | 2021-05-20T16:31:29.000Z | 2021-05-20T16:31:29.000Z | # Copyright 2014 Diamond Light Source Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
.. module:: hilbert_test
:platform: Unix
:synopsis: Tests the hilbert filter plugin
.. moduleauthor:: Tunhe Zhou <scientificsoftware@diamond.ac.uk>
"""
import unittest
from savu.test import test_utils as tu
from savu.test.travis.framework_tests.plugin_runner_test import \
run_protected_plugin_runner
class HilbertTest(unittest.TestCase):
def test_hilbert(self):
data_file = tu.get_test_data_path('24737.nxs')
process_file = tu.get_test_process_path('hilbert_test.nxs')
run_protected_plugin_runner(tu.set_options(data_file,
process_file=process_file))
if __name__ == "__main__":
unittest.main()
| 33.025641 | 78 | 0.726708 |
import unittest
from savu.test import test_utils as tu
from savu.test.travis.framework_tests.plugin_runner_test import \
run_protected_plugin_runner
class HilbertTest(unittest.TestCase):
def test_hilbert(self):
data_file = tu.get_test_data_path('24737.nxs')
process_file = tu.get_test_process_path('hilbert_test.nxs')
run_protected_plugin_runner(tu.set_options(data_file,
process_file=process_file))
if __name__ == "__main__":
unittest.main()
| true | true |
f71efcff616f3335a8fa825beb91566c27a20f16 | 35,204 | py | Python | tests/model_fields/test_jsonfield.py | kevinmarsh/django | 00a1d42bf0d83ba4b329271433eb5e3fd0f704fe | [
"CNRI-Python-GPL-Compatible",
"BSD-3-Clause"
] | null | null | null | tests/model_fields/test_jsonfield.py | kevinmarsh/django | 00a1d42bf0d83ba4b329271433eb5e3fd0f704fe | [
"CNRI-Python-GPL-Compatible",
"BSD-3-Clause"
] | null | null | null | tests/model_fields/test_jsonfield.py | kevinmarsh/django | 00a1d42bf0d83ba4b329271433eb5e3fd0f704fe | [
"CNRI-Python-GPL-Compatible",
"BSD-3-Clause"
] | null | null | null | import operator
import uuid
from unittest import mock
from django import forms
from django.core import serializers
from django.core.exceptions import ValidationError
from django.core.serializers.json import DjangoJSONEncoder
from django.db import (
DataError, IntegrityError, NotSupportedError, OperationalError, connection,
models,
)
from django.db.models import (
Count, ExpressionWrapper, F, IntegerField, OuterRef, Q, Subquery,
Transform, Value,
)
from django.db.models.expressions import RawSQL
from django.db.models.fields.json import (
KeyTextTransform, KeyTransform, KeyTransformFactory,
KeyTransformTextLookupMixin,
)
from django.db.models.functions import Cast
from django.test import (
SimpleTestCase, TestCase, skipIfDBFeature, skipUnlessDBFeature,
)
from django.test.utils import CaptureQueriesContext
from .models import (
CustomJSONDecoder, JSONModel, NullableJSONModel, RelatedJSONModel,
)
@skipUnlessDBFeature('supports_json_field')
class JSONFieldTests(TestCase):
def test_invalid_value(self):
msg = 'is not JSON serializable'
with self.assertRaisesMessage(TypeError, msg):
NullableJSONModel.objects.create(value={
'uuid': uuid.UUID('d85e2076-b67c-4ee7-8c3a-2bf5a2cc2475'),
})
def test_custom_encoder_decoder(self):
value = {'uuid': uuid.UUID('{d85e2076-b67c-4ee7-8c3a-2bf5a2cc2475}')}
obj = NullableJSONModel(value_custom=value)
obj.clean_fields()
obj.save()
obj.refresh_from_db()
self.assertEqual(obj.value_custom, value)
def test_db_check_constraints(self):
value = '{@!invalid json value 123 $!@#'
with mock.patch.object(DjangoJSONEncoder, 'encode', return_value=value):
with self.assertRaises((IntegrityError, DataError, OperationalError)):
NullableJSONModel.objects.create(value_custom=value)
class TestMethods(SimpleTestCase):
def test_deconstruct(self):
field = models.JSONField()
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, 'django.db.models.JSONField')
self.assertEqual(args, [])
self.assertEqual(kwargs, {})
def test_deconstruct_custom_encoder_decoder(self):
field = models.JSONField(encoder=DjangoJSONEncoder, decoder=CustomJSONDecoder)
name, path, args, kwargs = field.deconstruct()
self.assertEqual(kwargs['encoder'], DjangoJSONEncoder)
self.assertEqual(kwargs['decoder'], CustomJSONDecoder)
def test_get_transforms(self):
@models.JSONField.register_lookup
class MyTransform(Transform):
lookup_name = 'my_transform'
field = models.JSONField()
transform = field.get_transform('my_transform')
self.assertIs(transform, MyTransform)
models.JSONField._unregister_lookup(MyTransform)
models.JSONField._clear_cached_lookups()
transform = field.get_transform('my_transform')
self.assertIsInstance(transform, KeyTransformFactory)
def test_key_transform_text_lookup_mixin_non_key_transform(self):
transform = Transform('test')
msg = (
'Transform should be an instance of KeyTransform in order to use '
'this lookup.'
)
with self.assertRaisesMessage(TypeError, msg):
KeyTransformTextLookupMixin(transform)
class TestValidation(SimpleTestCase):
def test_invalid_encoder(self):
msg = 'The encoder parameter must be a callable object.'
with self.assertRaisesMessage(ValueError, msg):
models.JSONField(encoder=DjangoJSONEncoder())
def test_invalid_decoder(self):
msg = 'The decoder parameter must be a callable object.'
with self.assertRaisesMessage(ValueError, msg):
models.JSONField(decoder=CustomJSONDecoder())
def test_validation_error(self):
field = models.JSONField()
msg = 'Value must be valid JSON.'
value = uuid.UUID('{d85e2076-b67c-4ee7-8c3a-2bf5a2cc2475}')
with self.assertRaisesMessage(ValidationError, msg):
field.clean({'uuid': value}, None)
def test_custom_encoder(self):
field = models.JSONField(encoder=DjangoJSONEncoder)
value = uuid.UUID('{d85e2076-b67c-4ee7-8c3a-2bf5a2cc2475}')
field.clean({'uuid': value}, None)
class TestFormField(SimpleTestCase):
def test_formfield(self):
model_field = models.JSONField()
form_field = model_field.formfield()
self.assertIsInstance(form_field, forms.JSONField)
def test_formfield_custom_encoder_decoder(self):
model_field = models.JSONField(encoder=DjangoJSONEncoder, decoder=CustomJSONDecoder)
form_field = model_field.formfield()
self.assertIs(form_field.encoder, DjangoJSONEncoder)
self.assertIs(form_field.decoder, CustomJSONDecoder)
class TestSerialization(SimpleTestCase):
test_data = (
'[{"fields": {"value": %s}, '
'"model": "model_fields.jsonmodel", "pk": null}]'
)
test_values = (
# (Python value, serialized value),
({'a': 'b', 'c': None}, '{"a": "b", "c": null}'),
('abc', '"abc"'),
('{"a": "a"}', '"{\\"a\\": \\"a\\"}"'),
)
def test_dumping(self):
for value, serialized in self.test_values:
with self.subTest(value=value):
instance = JSONModel(value=value)
data = serializers.serialize('json', [instance])
self.assertJSONEqual(data, self.test_data % serialized)
def test_loading(self):
for value, serialized in self.test_values:
with self.subTest(value=value):
instance = list(
serializers.deserialize('json', self.test_data % serialized)
)[0].object
self.assertEqual(instance.value, value)
def test_xml_serialization(self):
test_xml_data = (
'<django-objects version="1.0">'
'<object model="model_fields.nullablejsonmodel">'
'<field name="value" type="JSONField">%s'
'</field></object></django-objects>'
)
for value, serialized in self.test_values:
with self.subTest(value=value):
instance = NullableJSONModel(value=value)
data = serializers.serialize('xml', [instance], fields=['value'])
self.assertXMLEqual(data, test_xml_data % serialized)
new_instance = list(serializers.deserialize('xml', data))[0].object
self.assertEqual(new_instance.value, instance.value)
@skipUnlessDBFeature('supports_json_field')
class TestSaveLoad(TestCase):
def test_null(self):
obj = NullableJSONModel(value=None)
obj.save()
obj.refresh_from_db()
self.assertIsNone(obj.value)
@skipUnlessDBFeature('supports_primitives_in_json_field')
def test_json_null_different_from_sql_null(self):
json_null = NullableJSONModel.objects.create(value=Value('null'))
json_null.refresh_from_db()
sql_null = NullableJSONModel.objects.create(value=None)
sql_null.refresh_from_db()
# 'null' is not equal to NULL in the database.
self.assertSequenceEqual(
NullableJSONModel.objects.filter(value=Value('null')),
[json_null],
)
self.assertSequenceEqual(
NullableJSONModel.objects.filter(value=None),
[json_null],
)
self.assertSequenceEqual(
NullableJSONModel.objects.filter(value__isnull=True),
[sql_null],
)
# 'null' is equal to NULL in Python (None).
self.assertEqual(json_null.value, sql_null.value)
@skipUnlessDBFeature('supports_primitives_in_json_field')
def test_primitives(self):
values = [
True,
1,
1.45,
'String',
'',
]
for value in values:
with self.subTest(value=value):
obj = JSONModel(value=value)
obj.save()
obj.refresh_from_db()
self.assertEqual(obj.value, value)
def test_dict(self):
values = [
{},
{'name': 'John', 'age': 20, 'height': 180.3},
{'a': True, 'b': {'b1': False, 'b2': None}},
]
for value in values:
with self.subTest(value=value):
obj = JSONModel.objects.create(value=value)
obj.refresh_from_db()
self.assertEqual(obj.value, value)
def test_list(self):
values = [
[],
['John', 20, 180.3],
[True, [False, None]],
]
for value in values:
with self.subTest(value=value):
obj = JSONModel.objects.create(value=value)
obj.refresh_from_db()
self.assertEqual(obj.value, value)
def test_realistic_object(self):
value = {
'name': 'John',
'age': 20,
'pets': [
{'name': 'Kit', 'type': 'cat', 'age': 2},
{'name': 'Max', 'type': 'dog', 'age': 1},
],
'courses': [
['A1', 'A2', 'A3'],
['B1', 'B2'],
['C1'],
],
}
obj = JSONModel.objects.create(value=value)
obj.refresh_from_db()
self.assertEqual(obj.value, value)
@skipUnlessDBFeature('supports_json_field')
class TestQuerying(TestCase):
@classmethod
def setUpTestData(cls):
cls.primitives = [True, False, 'yes', 7, 9.6]
values = [
None,
[],
{},
{'a': 'b', 'c': 14},
{
'a': 'b',
'c': 14,
'd': ['e', {'f': 'g'}],
'h': True,
'i': False,
'j': None,
'k': {'l': 'm'},
'n': [None],
'o': '"quoted"',
'p': 4.2,
},
[1, [2]],
{'k': True, 'l': False, 'foo': 'bax'},
{
'foo': 'bar',
'baz': {'a': 'b', 'c': 'd'},
'bar': ['foo', 'bar'],
'bax': {'foo': 'bar'},
},
]
cls.objs = [
NullableJSONModel.objects.create(value=value)
for value in values
]
if connection.features.supports_primitives_in_json_field:
cls.objs.extend([
NullableJSONModel.objects.create(value=value)
for value in cls.primitives
])
cls.raw_sql = '%s::jsonb' if connection.vendor == 'postgresql' else '%s'
def test_exact(self):
self.assertSequenceEqual(
NullableJSONModel.objects.filter(value__exact={}),
[self.objs[2]],
)
def test_exact_complex(self):
self.assertSequenceEqual(
NullableJSONModel.objects.filter(value__exact={'a': 'b', 'c': 14}),
[self.objs[3]],
)
def test_isnull(self):
self.assertSequenceEqual(
NullableJSONModel.objects.filter(value__isnull=True),
[self.objs[0]],
)
def test_ordering_by_transform(self):
mariadb = connection.vendor == 'mysql' and connection.mysql_is_mariadb
values = [
{'ord': 93, 'name': 'bar'},
{'ord': 22.1, 'name': 'foo'},
{'ord': -1, 'name': 'baz'},
{'ord': 21.931902, 'name': 'spam'},
{'ord': -100291029, 'name': 'eggs'},
]
for field_name in ['value', 'value_custom']:
with self.subTest(field=field_name):
objs = [
NullableJSONModel.objects.create(**{field_name: value})
for value in values
]
query = NullableJSONModel.objects.filter(
**{'%s__name__isnull' % field_name: False},
).order_by('%s__ord' % field_name)
expected = [objs[4], objs[2], objs[3], objs[1], objs[0]]
if mariadb or connection.vendor == 'oracle':
# MariaDB and Oracle return JSON values as strings.
expected = [objs[2], objs[4], objs[3], objs[1], objs[0]]
self.assertSequenceEqual(query, expected)
def test_ordering_grouping_by_key_transform(self):
base_qs = NullableJSONModel.objects.filter(value__d__0__isnull=False)
for qs in (
base_qs.order_by('value__d__0'),
base_qs.annotate(key=KeyTransform('0', KeyTransform('d', 'value'))).order_by('key'),
):
self.assertSequenceEqual(qs, [self.objs[4]])
qs = NullableJSONModel.objects.filter(value__isnull=False)
self.assertQuerysetEqual(
qs.filter(value__isnull=False).annotate(
key=KeyTextTransform('f', KeyTransform('1', KeyTransform('d', 'value'))),
).values('key').annotate(count=Count('key')).order_by('count'),
[(None, 0), ('g', 1)],
operator.itemgetter('key', 'count'),
)
def test_ordering_grouping_by_count(self):
qs = NullableJSONModel.objects.filter(
value__isnull=False,
).values('value__d__0').annotate(count=Count('value__d__0')).order_by('count')
self.assertQuerysetEqual(qs, [0, 1], operator.itemgetter('count'))
def test_order_grouping_custom_decoder(self):
NullableJSONModel.objects.create(value_custom={'a': 'b'})
qs = NullableJSONModel.objects.filter(value_custom__isnull=False)
self.assertSequenceEqual(
qs.values(
'value_custom__a',
).annotate(
count=Count('id'),
).order_by('value_custom__a'),
[{'value_custom__a': 'b', 'count': 1}],
)
def test_key_transform_raw_expression(self):
expr = RawSQL(self.raw_sql, ['{"x": "bar"}'])
self.assertSequenceEqual(
NullableJSONModel.objects.filter(value__foo=KeyTransform('x', expr)),
[self.objs[7]],
)
def test_nested_key_transform_raw_expression(self):
expr = RawSQL(self.raw_sql, ['{"x": {"y": "bar"}}'])
self.assertSequenceEqual(
NullableJSONModel.objects.filter(value__foo=KeyTransform('y', KeyTransform('x', expr))),
[self.objs[7]],
)
def test_key_transform_expression(self):
self.assertSequenceEqual(
NullableJSONModel.objects.filter(value__d__0__isnull=False).annotate(
key=KeyTransform('d', 'value'),
chain=KeyTransform('0', 'key'),
expr=KeyTransform('0', Cast('key', models.JSONField())),
).filter(chain=F('expr')),
[self.objs[4]],
)
def test_key_transform_annotation_expression(self):
obj = NullableJSONModel.objects.create(value={'d': ['e', 'e']})
self.assertSequenceEqual(
NullableJSONModel.objects.filter(value__d__0__isnull=False).annotate(
key=F('value__d'),
chain=F('key__0'),
expr=Cast('key', models.JSONField()),
).filter(chain=F('expr__1')),
[obj],
)
def test_nested_key_transform_expression(self):
self.assertSequenceEqual(
NullableJSONModel.objects.filter(value__d__0__isnull=False).annotate(
key=KeyTransform('d', 'value'),
chain=KeyTransform('f', KeyTransform('1', 'key')),
expr=KeyTransform('f', KeyTransform('1', Cast('key', models.JSONField()))),
).filter(chain=F('expr')),
[self.objs[4]],
)
def test_nested_key_transform_annotation_expression(self):
obj = NullableJSONModel.objects.create(
value={'d': ['e', {'f': 'g'}, {'f': 'g'}]},
)
self.assertSequenceEqual(
NullableJSONModel.objects.filter(value__d__0__isnull=False).annotate(
key=F('value__d'),
chain=F('key__1__f'),
expr=Cast('key', models.JSONField()),
).filter(chain=F('expr__2__f')),
[obj],
)
def test_nested_key_transform_on_subquery(self):
self.assertSequenceEqual(
NullableJSONModel.objects.filter(value__d__0__isnull=False).annotate(
subquery_value=Subquery(
NullableJSONModel.objects.filter(pk=OuterRef('pk')).values('value')
),
key=KeyTransform('d', 'subquery_value'),
chain=KeyTransform('f', KeyTransform('1', 'key')),
).filter(chain='g'),
[self.objs[4]],
)
def test_expression_wrapper_key_transform(self):
self.assertSequenceEqual(
NullableJSONModel.objects.annotate(
expr=ExpressionWrapper(
KeyTransform('c', 'value'),
output_field=IntegerField(),
),
).filter(expr__isnull=False),
self.objs[3:5],
)
def test_has_key(self):
self.assertSequenceEqual(
NullableJSONModel.objects.filter(value__has_key='a'),
[self.objs[3], self.objs[4]],
)
def test_has_key_null_value(self):
self.assertSequenceEqual(
NullableJSONModel.objects.filter(value__has_key='j'),
[self.objs[4]],
)
def test_has_key_deep(self):
tests = [
(Q(value__baz__has_key='a'), self.objs[7]),
(Q(value__has_key=KeyTransform('a', KeyTransform('baz', 'value'))), self.objs[7]),
(Q(value__has_key=F('value__baz__a')), self.objs[7]),
(Q(value__has_key=KeyTransform('c', KeyTransform('baz', 'value'))), self.objs[7]),
(Q(value__has_key=F('value__baz__c')), self.objs[7]),
(Q(value__d__1__has_key='f'), self.objs[4]),
(
Q(value__has_key=KeyTransform('f', KeyTransform('1', KeyTransform('d', 'value')))),
self.objs[4],
),
(Q(value__has_key=F('value__d__1__f')), self.objs[4]),
]
for condition, expected in tests:
with self.subTest(condition=condition):
self.assertSequenceEqual(
NullableJSONModel.objects.filter(condition),
[expected],
)
def test_has_key_list(self):
obj = NullableJSONModel.objects.create(value=[{'a': 1}, {'b': 'x'}])
tests = [
Q(value__1__has_key='b'),
Q(value__has_key=KeyTransform('b', KeyTransform(1, 'value'))),
Q(value__has_key=KeyTransform('b', KeyTransform('1', 'value'))),
Q(value__has_key=F('value__1__b')),
]
for condition in tests:
with self.subTest(condition=condition):
self.assertSequenceEqual(
NullableJSONModel.objects.filter(condition),
[obj],
)
def test_has_keys(self):
self.assertSequenceEqual(
NullableJSONModel.objects.filter(value__has_keys=['a', 'c', 'h']),
[self.objs[4]],
)
def test_has_any_keys(self):
self.assertSequenceEqual(
NullableJSONModel.objects.filter(value__has_any_keys=['c', 'l']),
[self.objs[3], self.objs[4], self.objs[6]],
)
@skipUnlessDBFeature('supports_json_field_contains')
def test_contains(self):
tests = [
({}, self.objs[2:5] + self.objs[6:8]),
({'baz': {'a': 'b', 'c': 'd'}}, [self.objs[7]]),
({'baz': {'a': 'b'}}, [self.objs[7]]),
({'baz': {'c': 'd'}}, [self.objs[7]]),
({'k': True, 'l': False}, [self.objs[6]]),
({'d': ['e', {'f': 'g'}]}, [self.objs[4]]),
({'d': ['e']}, [self.objs[4]]),
({'d': [{'f': 'g'}]}, [self.objs[4]]),
([1, [2]], [self.objs[5]]),
([1], [self.objs[5]]),
([[2]], [self.objs[5]]),
({'n': [None]}, [self.objs[4]]),
({'j': None}, [self.objs[4]]),
]
for value, expected in tests:
with self.subTest(value=value):
qs = NullableJSONModel.objects.filter(value__contains=value)
self.assertSequenceEqual(qs, expected)
@skipIfDBFeature('supports_json_field_contains')
def test_contains_unsupported(self):
msg = 'contains lookup is not supported on this database backend.'
with self.assertRaisesMessage(NotSupportedError, msg):
NullableJSONModel.objects.filter(
value__contains={'baz': {'a': 'b', 'c': 'd'}},
).get()
@skipUnlessDBFeature(
'supports_primitives_in_json_field',
'supports_json_field_contains',
)
def test_contains_primitives(self):
for value in self.primitives:
with self.subTest(value=value):
qs = NullableJSONModel.objects.filter(value__contains=value)
self.assertIs(qs.exists(), True)
@skipUnlessDBFeature('supports_json_field_contains')
def test_contained_by(self):
qs = NullableJSONModel.objects.filter(value__contained_by={'a': 'b', 'c': 14, 'h': True})
self.assertSequenceEqual(qs, self.objs[2:4])
@skipIfDBFeature('supports_json_field_contains')
def test_contained_by_unsupported(self):
msg = 'contained_by lookup is not supported on this database backend.'
with self.assertRaisesMessage(NotSupportedError, msg):
NullableJSONModel.objects.filter(value__contained_by={'a': 'b'}).get()
def test_deep_values(self):
qs = NullableJSONModel.objects.values_list('value__k__l')
expected_objs = [(None,)] * len(self.objs)
expected_objs[4] = ('m',)
self.assertSequenceEqual(qs, expected_objs)
@skipUnlessDBFeature('can_distinct_on_fields')
def test_deep_distinct(self):
query = NullableJSONModel.objects.distinct('value__k__l').values_list('value__k__l')
self.assertSequenceEqual(query, [('m',), (None,)])
def test_isnull_key(self):
# key__isnull=False works the same as has_key='key'.
self.assertSequenceEqual(
NullableJSONModel.objects.filter(value__a__isnull=True),
self.objs[:3] + self.objs[5:],
)
self.assertSequenceEqual(
NullableJSONModel.objects.filter(value__a__isnull=False),
[self.objs[3], self.objs[4]],
)
self.assertSequenceEqual(
NullableJSONModel.objects.filter(value__j__isnull=False),
[self.objs[4]],
)
def test_isnull_key_or_none(self):
obj = NullableJSONModel.objects.create(value={'a': None})
self.assertSequenceEqual(
NullableJSONModel.objects.filter(Q(value__a__isnull=True) | Q(value__a=None)),
self.objs[:3] + self.objs[5:] + [obj],
)
def test_none_key(self):
self.assertSequenceEqual(
NullableJSONModel.objects.filter(value__j=None),
[self.objs[4]],
)
def test_none_key_exclude(self):
obj = NullableJSONModel.objects.create(value={'j': 1})
if connection.vendor == 'oracle':
# Oracle supports filtering JSON objects with NULL keys, but the
# current implementation doesn't support it.
self.assertSequenceEqual(
NullableJSONModel.objects.exclude(value__j=None),
self.objs[1:4] + self.objs[5:] + [obj],
)
else:
self.assertSequenceEqual(NullableJSONModel.objects.exclude(value__j=None), [obj])
def test_shallow_list_lookup(self):
self.assertSequenceEqual(
NullableJSONModel.objects.filter(value__0=1),
[self.objs[5]],
)
def test_shallow_obj_lookup(self):
self.assertSequenceEqual(
NullableJSONModel.objects.filter(value__a='b'),
[self.objs[3], self.objs[4]],
)
def test_obj_subquery_lookup(self):
qs = NullableJSONModel.objects.annotate(
field=Subquery(NullableJSONModel.objects.filter(pk=OuterRef('pk')).values('value')),
).filter(field__a='b')
self.assertSequenceEqual(qs, [self.objs[3], self.objs[4]])
def test_deep_lookup_objs(self):
self.assertSequenceEqual(
NullableJSONModel.objects.filter(value__k__l='m'),
[self.objs[4]],
)
def test_shallow_lookup_obj_target(self):
self.assertSequenceEqual(
NullableJSONModel.objects.filter(value__k={'l': 'm'}),
[self.objs[4]],
)
def test_deep_lookup_array(self):
self.assertSequenceEqual(
NullableJSONModel.objects.filter(value__1__0=2),
[self.objs[5]],
)
def test_deep_lookup_mixed(self):
self.assertSequenceEqual(
NullableJSONModel.objects.filter(value__d__1__f='g'),
[self.objs[4]],
)
def test_deep_lookup_transform(self):
self.assertSequenceEqual(
NullableJSONModel.objects.filter(value__c__gt=2),
[self.objs[3], self.objs[4]],
)
self.assertSequenceEqual(
NullableJSONModel.objects.filter(value__c__gt=2.33),
[self.objs[3], self.objs[4]],
)
self.assertIs(NullableJSONModel.objects.filter(value__c__lt=5).exists(), False)
def test_lookup_exclude(self):
tests = [
(Q(value__a='b'), [self.objs[0]]),
(Q(value__foo='bax'), [self.objs[0], self.objs[7]]),
]
for condition, expected in tests:
self.assertSequenceEqual(
NullableJSONModel.objects.exclude(condition),
expected,
)
self.assertSequenceEqual(
NullableJSONModel.objects.filter(~condition),
expected,
)
def test_lookup_exclude_nonexistent_key(self):
# Values without the key are ignored.
condition = Q(value__foo='bax')
objs_with_value = [self.objs[6]]
objs_with_different_value = [self.objs[0], self.objs[7]]
self.assertSequenceEqual(
NullableJSONModel.objects.exclude(condition),
objs_with_different_value,
)
self.assertSequenceEqual(
NullableJSONModel.objects.exclude(~condition),
objs_with_value,
)
self.assertCountEqual(
NullableJSONModel.objects.filter(condition | ~condition),
objs_with_value + objs_with_different_value,
)
self.assertCountEqual(
NullableJSONModel.objects.exclude(condition & ~condition),
objs_with_value + objs_with_different_value,
)
# Add the __isnull lookup to get an exhaustive set.
self.assertSequenceEqual(
NullableJSONModel.objects.exclude(condition & Q(value__foo__isnull=False)),
self.objs[0:6] + self.objs[7:],
)
self.assertSequenceEqual(
NullableJSONModel.objects.filter(condition & Q(value__foo__isnull=False)),
objs_with_value,
)
def test_usage_in_subquery(self):
self.assertSequenceEqual(
NullableJSONModel.objects.filter(
id__in=NullableJSONModel.objects.filter(value__c=14),
),
self.objs[3:5],
)
@skipUnlessDBFeature('supports_json_field_contains')
def test_array_key_contains(self):
tests = [
([], [self.objs[7]]),
('bar', [self.objs[7]]),
(['bar'], [self.objs[7]]),
('ar', []),
]
for value, expected in tests:
with self.subTest(value=value):
self.assertSequenceEqual(
NullableJSONModel.objects.filter(value__bar__contains=value),
expected,
)
def test_key_iexact(self):
self.assertIs(NullableJSONModel.objects.filter(value__foo__iexact='BaR').exists(), True)
self.assertIs(NullableJSONModel.objects.filter(value__foo__iexact='"BaR"').exists(), False)
def test_key_in(self):
tests = [
('value__c__in', [14], self.objs[3:5]),
('value__c__in', [14, 15], self.objs[3:5]),
('value__0__in', [1], [self.objs[5]]),
('value__0__in', [1, 3], [self.objs[5]]),
('value__foo__in', ['bar'], [self.objs[7]]),
(
'value__foo__in',
[KeyTransform('foo', KeyTransform('bax', 'value'))],
[self.objs[7]],
),
('value__foo__in', [F('value__bax__foo')], [self.objs[7]]),
(
'value__foo__in',
[KeyTransform('foo', KeyTransform('bax', 'value')), 'baz'],
[self.objs[7]],
),
('value__foo__in', [F('value__bax__foo'), 'baz'], [self.objs[7]]),
('value__foo__in', ['bar', 'baz'], [self.objs[7]]),
('value__bar__in', [['foo', 'bar']], [self.objs[7]]),
('value__bar__in', [['foo', 'bar'], ['a']], [self.objs[7]]),
('value__bax__in', [{'foo': 'bar'}, {'a': 'b'}], [self.objs[7]]),
]
for lookup, value, expected in tests:
with self.subTest(lookup=lookup, value=value):
self.assertSequenceEqual(
NullableJSONModel.objects.filter(**{lookup: value}),
expected,
)
def test_key_values(self):
qs = NullableJSONModel.objects.filter(value__h=True)
tests = [
('value__a', 'b'),
('value__c', 14),
('value__d', ['e', {'f': 'g'}]),
('value__h', True),
('value__i', False),
('value__j', None),
('value__k', {'l': 'm'}),
('value__n', [None]),
('value__p', 4.2),
]
for lookup, expected in tests:
with self.subTest(lookup=lookup):
self.assertEqual(qs.values_list(lookup, flat=True).get(), expected)
@skipUnlessDBFeature('supports_json_field_contains')
def test_key_contains(self):
self.assertIs(NullableJSONModel.objects.filter(value__foo__contains='ar').exists(), False)
self.assertIs(NullableJSONModel.objects.filter(value__foo__contains='bar').exists(), True)
def test_key_icontains(self):
self.assertIs(NullableJSONModel.objects.filter(value__foo__icontains='Ar').exists(), True)
def test_key_startswith(self):
self.assertIs(NullableJSONModel.objects.filter(value__foo__startswith='b').exists(), True)
def test_key_istartswith(self):
self.assertIs(NullableJSONModel.objects.filter(value__foo__istartswith='B').exists(), True)
def test_key_endswith(self):
self.assertIs(NullableJSONModel.objects.filter(value__foo__endswith='r').exists(), True)
def test_key_iendswith(self):
self.assertIs(NullableJSONModel.objects.filter(value__foo__iendswith='R').exists(), True)
def test_key_regex(self):
self.assertIs(NullableJSONModel.objects.filter(value__foo__regex=r'^bar$').exists(), True)
def test_key_iregex(self):
self.assertIs(NullableJSONModel.objects.filter(value__foo__iregex=r'^bAr$').exists(), True)
def test_key_quoted_string(self):
self.assertEqual(
NullableJSONModel.objects.filter(value__o='"quoted"').get(),
self.objs[4],
)
@skipUnlessDBFeature('has_json_operators')
def test_key_sql_injection(self):
with CaptureQueriesContext(connection) as queries:
self.assertIs(
NullableJSONModel.objects.filter(**{
"""value__test' = '"a"') OR 1 = 1 OR ('d""": 'x',
}).exists(),
False,
)
self.assertIn(
"""."value" -> 'test'' = ''"a"'') OR 1 = 1 OR (''d') = '"x"' """,
queries[0]['sql'],
)
@skipIfDBFeature('has_json_operators')
def test_key_sql_injection_escape(self):
query = str(JSONModel.objects.filter(**{
"""value__test") = '"a"' OR 1 = 1 OR ("d""": 'x',
}).query)
self.assertIn('"test\\"', query)
self.assertIn('\\"d', query)
def test_key_escape(self):
obj = NullableJSONModel.objects.create(value={'%total': 10})
self.assertEqual(NullableJSONModel.objects.filter(**{'value__%total': 10}).get(), obj)
def test_none_key_and_exact_lookup(self):
self.assertSequenceEqual(
NullableJSONModel.objects.filter(value__a='b', value__j=None),
[self.objs[4]],
)
def test_lookups_with_key_transform(self):
tests = (
('value__baz__has_key', 'c'),
('value__baz__has_keys', ['a', 'c']),
('value__baz__has_any_keys', ['a', 'x']),
('value__has_key', KeyTextTransform('foo', 'value')),
)
for lookup, value in tests:
with self.subTest(lookup=lookup):
self.assertIs(NullableJSONModel.objects.filter(
**{lookup: value},
).exists(), True)
@skipUnlessDBFeature('supports_json_field_contains')
def test_contains_contained_by_with_key_transform(self):
tests = [
('value__d__contains', 'e'),
('value__d__contains', [{'f': 'g'}]),
('value__contains', KeyTransform('bax', 'value')),
('value__contains', F('value__bax')),
('value__baz__contains', {'a': 'b'}),
('value__baz__contained_by', {'a': 'b', 'c': 'd', 'e': 'f'}),
(
'value__contained_by',
KeyTransform('x', RawSQL(
self.raw_sql,
['{"x": {"a": "b", "c": 1, "d": "e"}}'],
)),
),
]
# For databases where {'f': 'g'} (without surrounding []) matches
# [{'f': 'g'}].
if not connection.features.json_key_contains_list_matching_requires_list:
tests.append(('value__d__contains', {'f': 'g'}))
for lookup, value in tests:
with self.subTest(lookup=lookup, value=value):
self.assertIs(NullableJSONModel.objects.filter(
**{lookup: value},
).exists(), True)
def test_join_key_transform_annotation_expression(self):
related_obj = RelatedJSONModel.objects.create(
value={'d': ['f', 'e']},
json_model=self.objs[4],
)
RelatedJSONModel.objects.create(
value={'d': ['e', 'f']},
json_model=self.objs[4],
)
self.assertSequenceEqual(
RelatedJSONModel.objects.annotate(
key=F('value__d'),
related_key=F('json_model__value__d'),
chain=F('key__1'),
expr=Cast('key', models.JSONField()),
).filter(chain=F('related_key__0')),
[related_obj],
)
| 38.306855 | 100 | 0.572293 | import operator
import uuid
from unittest import mock
from django import forms
from django.core import serializers
from django.core.exceptions import ValidationError
from django.core.serializers.json import DjangoJSONEncoder
from django.db import (
DataError, IntegrityError, NotSupportedError, OperationalError, connection,
models,
)
from django.db.models import (
Count, ExpressionWrapper, F, IntegerField, OuterRef, Q, Subquery,
Transform, Value,
)
from django.db.models.expressions import RawSQL
from django.db.models.fields.json import (
KeyTextTransform, KeyTransform, KeyTransformFactory,
KeyTransformTextLookupMixin,
)
from django.db.models.functions import Cast
from django.test import (
SimpleTestCase, TestCase, skipIfDBFeature, skipUnlessDBFeature,
)
from django.test.utils import CaptureQueriesContext
from .models import (
CustomJSONDecoder, JSONModel, NullableJSONModel, RelatedJSONModel,
)
@skipUnlessDBFeature('supports_json_field')
class JSONFieldTests(TestCase):
def test_invalid_value(self):
msg = 'is not JSON serializable'
with self.assertRaisesMessage(TypeError, msg):
NullableJSONModel.objects.create(value={
'uuid': uuid.UUID('d85e2076-b67c-4ee7-8c3a-2bf5a2cc2475'),
})
def test_custom_encoder_decoder(self):
value = {'uuid': uuid.UUID('{d85e2076-b67c-4ee7-8c3a-2bf5a2cc2475}')}
obj = NullableJSONModel(value_custom=value)
obj.clean_fields()
obj.save()
obj.refresh_from_db()
self.assertEqual(obj.value_custom, value)
def test_db_check_constraints(self):
value = '{@!invalid json value 123 $!@#'
with mock.patch.object(DjangoJSONEncoder, 'encode', return_value=value):
with self.assertRaises((IntegrityError, DataError, OperationalError)):
NullableJSONModel.objects.create(value_custom=value)
class TestMethods(SimpleTestCase):
def test_deconstruct(self):
field = models.JSONField()
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, 'django.db.models.JSONField')
self.assertEqual(args, [])
self.assertEqual(kwargs, {})
def test_deconstruct_custom_encoder_decoder(self):
field = models.JSONField(encoder=DjangoJSONEncoder, decoder=CustomJSONDecoder)
name, path, args, kwargs = field.deconstruct()
self.assertEqual(kwargs['encoder'], DjangoJSONEncoder)
self.assertEqual(kwargs['decoder'], CustomJSONDecoder)
def test_get_transforms(self):
@models.JSONField.register_lookup
class MyTransform(Transform):
lookup_name = 'my_transform'
field = models.JSONField()
transform = field.get_transform('my_transform')
self.assertIs(transform, MyTransform)
models.JSONField._unregister_lookup(MyTransform)
models.JSONField._clear_cached_lookups()
transform = field.get_transform('my_transform')
self.assertIsInstance(transform, KeyTransformFactory)
def test_key_transform_text_lookup_mixin_non_key_transform(self):
transform = Transform('test')
msg = (
'Transform should be an instance of KeyTransform in order to use '
'this lookup.'
)
with self.assertRaisesMessage(TypeError, msg):
KeyTransformTextLookupMixin(transform)
class TestValidation(SimpleTestCase):
def test_invalid_encoder(self):
msg = 'The encoder parameter must be a callable object.'
with self.assertRaisesMessage(ValueError, msg):
models.JSONField(encoder=DjangoJSONEncoder())
def test_invalid_decoder(self):
msg = 'The decoder parameter must be a callable object.'
with self.assertRaisesMessage(ValueError, msg):
models.JSONField(decoder=CustomJSONDecoder())
def test_validation_error(self):
field = models.JSONField()
msg = 'Value must be valid JSON.'
value = uuid.UUID('{d85e2076-b67c-4ee7-8c3a-2bf5a2cc2475}')
with self.assertRaisesMessage(ValidationError, msg):
field.clean({'uuid': value}, None)
def test_custom_encoder(self):
field = models.JSONField(encoder=DjangoJSONEncoder)
value = uuid.UUID('{d85e2076-b67c-4ee7-8c3a-2bf5a2cc2475}')
field.clean({'uuid': value}, None)
class TestFormField(SimpleTestCase):
def test_formfield(self):
model_field = models.JSONField()
form_field = model_field.formfield()
self.assertIsInstance(form_field, forms.JSONField)
def test_formfield_custom_encoder_decoder(self):
model_field = models.JSONField(encoder=DjangoJSONEncoder, decoder=CustomJSONDecoder)
form_field = model_field.formfield()
self.assertIs(form_field.encoder, DjangoJSONEncoder)
self.assertIs(form_field.decoder, CustomJSONDecoder)
class TestSerialization(SimpleTestCase):
test_data = (
'[{"fields": {"value": %s}, '
'"model": "model_fields.jsonmodel", "pk": null}]'
)
test_values = (
({'a': 'b', 'c': None}, '{"a": "b", "c": null}'),
('abc', '"abc"'),
('{"a": "a"}', '"{\\"a\\": \\"a\\"}"'),
)
def test_dumping(self):
for value, serialized in self.test_values:
with self.subTest(value=value):
instance = JSONModel(value=value)
data = serializers.serialize('json', [instance])
self.assertJSONEqual(data, self.test_data % serialized)
def test_loading(self):
for value, serialized in self.test_values:
with self.subTest(value=value):
instance = list(
serializers.deserialize('json', self.test_data % serialized)
)[0].object
self.assertEqual(instance.value, value)
def test_xml_serialization(self):
test_xml_data = (
'<django-objects version="1.0">'
'<object model="model_fields.nullablejsonmodel">'
'<field name="value" type="JSONField">%s'
'</field></object></django-objects>'
)
for value, serialized in self.test_values:
with self.subTest(value=value):
instance = NullableJSONModel(value=value)
data = serializers.serialize('xml', [instance], fields=['value'])
self.assertXMLEqual(data, test_xml_data % serialized)
new_instance = list(serializers.deserialize('xml', data))[0].object
self.assertEqual(new_instance.value, instance.value)
@skipUnlessDBFeature('supports_json_field')
class TestSaveLoad(TestCase):
def test_null(self):
obj = NullableJSONModel(value=None)
obj.save()
obj.refresh_from_db()
self.assertIsNone(obj.value)
@skipUnlessDBFeature('supports_primitives_in_json_field')
def test_json_null_different_from_sql_null(self):
json_null = NullableJSONModel.objects.create(value=Value('null'))
json_null.refresh_from_db()
sql_null = NullableJSONModel.objects.create(value=None)
sql_null.refresh_from_db()
self.assertSequenceEqual(
NullableJSONModel.objects.filter(value=Value('null')),
[json_null],
)
self.assertSequenceEqual(
NullableJSONModel.objects.filter(value=None),
[json_null],
)
self.assertSequenceEqual(
NullableJSONModel.objects.filter(value__isnull=True),
[sql_null],
)
self.assertEqual(json_null.value, sql_null.value)
@skipUnlessDBFeature('supports_primitives_in_json_field')
def test_primitives(self):
values = [
True,
1,
1.45,
'String',
'',
]
for value in values:
with self.subTest(value=value):
obj = JSONModel(value=value)
obj.save()
obj.refresh_from_db()
self.assertEqual(obj.value, value)
def test_dict(self):
values = [
{},
{'name': 'John', 'age': 20, 'height': 180.3},
{'a': True, 'b': {'b1': False, 'b2': None}},
]
for value in values:
with self.subTest(value=value):
obj = JSONModel.objects.create(value=value)
obj.refresh_from_db()
self.assertEqual(obj.value, value)
def test_list(self):
values = [
[],
['John', 20, 180.3],
[True, [False, None]],
]
for value in values:
with self.subTest(value=value):
obj = JSONModel.objects.create(value=value)
obj.refresh_from_db()
self.assertEqual(obj.value, value)
def test_realistic_object(self):
value = {
'name': 'John',
'age': 20,
'pets': [
{'name': 'Kit', 'type': 'cat', 'age': 2},
{'name': 'Max', 'type': 'dog', 'age': 1},
],
'courses': [
['A1', 'A2', 'A3'],
['B1', 'B2'],
['C1'],
],
}
obj = JSONModel.objects.create(value=value)
obj.refresh_from_db()
self.assertEqual(obj.value, value)
@skipUnlessDBFeature('supports_json_field')
class TestQuerying(TestCase):
@classmethod
def setUpTestData(cls):
cls.primitives = [True, False, 'yes', 7, 9.6]
values = [
None,
[],
{},
{'a': 'b', 'c': 14},
{
'a': 'b',
'c': 14,
'd': ['e', {'f': 'g'}],
'h': True,
'i': False,
'j': None,
'k': {'l': 'm'},
'n': [None],
'o': '"quoted"',
'p': 4.2,
},
[1, [2]],
{'k': True, 'l': False, 'foo': 'bax'},
{
'foo': 'bar',
'baz': {'a': 'b', 'c': 'd'},
'bar': ['foo', 'bar'],
'bax': {'foo': 'bar'},
},
]
cls.objs = [
NullableJSONModel.objects.create(value=value)
for value in values
]
if connection.features.supports_primitives_in_json_field:
cls.objs.extend([
NullableJSONModel.objects.create(value=value)
for value in cls.primitives
])
cls.raw_sql = '%s::jsonb' if connection.vendor == 'postgresql' else '%s'
def test_exact(self):
self.assertSequenceEqual(
NullableJSONModel.objects.filter(value__exact={}),
[self.objs[2]],
)
def test_exact_complex(self):
self.assertSequenceEqual(
NullableJSONModel.objects.filter(value__exact={'a': 'b', 'c': 14}),
[self.objs[3]],
)
def test_isnull(self):
self.assertSequenceEqual(
NullableJSONModel.objects.filter(value__isnull=True),
[self.objs[0]],
)
def test_ordering_by_transform(self):
mariadb = connection.vendor == 'mysql' and connection.mysql_is_mariadb
values = [
{'ord': 93, 'name': 'bar'},
{'ord': 22.1, 'name': 'foo'},
{'ord': -1, 'name': 'baz'},
{'ord': 21.931902, 'name': 'spam'},
{'ord': -100291029, 'name': 'eggs'},
]
for field_name in ['value', 'value_custom']:
with self.subTest(field=field_name):
objs = [
NullableJSONModel.objects.create(**{field_name: value})
for value in values
]
query = NullableJSONModel.objects.filter(
**{'%s__name__isnull' % field_name: False},
).order_by('%s__ord' % field_name)
expected = [objs[4], objs[2], objs[3], objs[1], objs[0]]
if mariadb or connection.vendor == 'oracle':
expected = [objs[2], objs[4], objs[3], objs[1], objs[0]]
self.assertSequenceEqual(query, expected)
def test_ordering_grouping_by_key_transform(self):
base_qs = NullableJSONModel.objects.filter(value__d__0__isnull=False)
for qs in (
base_qs.order_by('value__d__0'),
base_qs.annotate(key=KeyTransform('0', KeyTransform('d', 'value'))).order_by('key'),
):
self.assertSequenceEqual(qs, [self.objs[4]])
qs = NullableJSONModel.objects.filter(value__isnull=False)
self.assertQuerysetEqual(
qs.filter(value__isnull=False).annotate(
key=KeyTextTransform('f', KeyTransform('1', KeyTransform('d', 'value'))),
).values('key').annotate(count=Count('key')).order_by('count'),
[(None, 0), ('g', 1)],
operator.itemgetter('key', 'count'),
)
def test_ordering_grouping_by_count(self):
qs = NullableJSONModel.objects.filter(
value__isnull=False,
).values('value__d__0').annotate(count=Count('value__d__0')).order_by('count')
self.assertQuerysetEqual(qs, [0, 1], operator.itemgetter('count'))
def test_order_grouping_custom_decoder(self):
NullableJSONModel.objects.create(value_custom={'a': 'b'})
qs = NullableJSONModel.objects.filter(value_custom__isnull=False)
self.assertSequenceEqual(
qs.values(
'value_custom__a',
).annotate(
count=Count('id'),
).order_by('value_custom__a'),
[{'value_custom__a': 'b', 'count': 1}],
)
def test_key_transform_raw_expression(self):
expr = RawSQL(self.raw_sql, ['{"x": "bar"}'])
self.assertSequenceEqual(
NullableJSONModel.objects.filter(value__foo=KeyTransform('x', expr)),
[self.objs[7]],
)
def test_nested_key_transform_raw_expression(self):
expr = RawSQL(self.raw_sql, ['{"x": {"y": "bar"}}'])
self.assertSequenceEqual(
NullableJSONModel.objects.filter(value__foo=KeyTransform('y', KeyTransform('x', expr))),
[self.objs[7]],
)
def test_key_transform_expression(self):
self.assertSequenceEqual(
NullableJSONModel.objects.filter(value__d__0__isnull=False).annotate(
key=KeyTransform('d', 'value'),
chain=KeyTransform('0', 'key'),
expr=KeyTransform('0', Cast('key', models.JSONField())),
).filter(chain=F('expr')),
[self.objs[4]],
)
def test_key_transform_annotation_expression(self):
obj = NullableJSONModel.objects.create(value={'d': ['e', 'e']})
self.assertSequenceEqual(
NullableJSONModel.objects.filter(value__d__0__isnull=False).annotate(
key=F('value__d'),
chain=F('key__0'),
expr=Cast('key', models.JSONField()),
).filter(chain=F('expr__1')),
[obj],
)
def test_nested_key_transform_expression(self):
self.assertSequenceEqual(
NullableJSONModel.objects.filter(value__d__0__isnull=False).annotate(
key=KeyTransform('d', 'value'),
chain=KeyTransform('f', KeyTransform('1', 'key')),
expr=KeyTransform('f', KeyTransform('1', Cast('key', models.JSONField()))),
).filter(chain=F('expr')),
[self.objs[4]],
)
def test_nested_key_transform_annotation_expression(self):
obj = NullableJSONModel.objects.create(
value={'d': ['e', {'f': 'g'}, {'f': 'g'}]},
)
self.assertSequenceEqual(
NullableJSONModel.objects.filter(value__d__0__isnull=False).annotate(
key=F('value__d'),
chain=F('key__1__f'),
expr=Cast('key', models.JSONField()),
).filter(chain=F('expr__2__f')),
[obj],
)
def test_nested_key_transform_on_subquery(self):
self.assertSequenceEqual(
NullableJSONModel.objects.filter(value__d__0__isnull=False).annotate(
subquery_value=Subquery(
NullableJSONModel.objects.filter(pk=OuterRef('pk')).values('value')
),
key=KeyTransform('d', 'subquery_value'),
chain=KeyTransform('f', KeyTransform('1', 'key')),
).filter(chain='g'),
[self.objs[4]],
)
def test_expression_wrapper_key_transform(self):
self.assertSequenceEqual(
NullableJSONModel.objects.annotate(
expr=ExpressionWrapper(
KeyTransform('c', 'value'),
output_field=IntegerField(),
),
).filter(expr__isnull=False),
self.objs[3:5],
)
def test_has_key(self):
self.assertSequenceEqual(
NullableJSONModel.objects.filter(value__has_key='a'),
[self.objs[3], self.objs[4]],
)
def test_has_key_null_value(self):
self.assertSequenceEqual(
NullableJSONModel.objects.filter(value__has_key='j'),
[self.objs[4]],
)
def test_has_key_deep(self):
tests = [
(Q(value__baz__has_key='a'), self.objs[7]),
(Q(value__has_key=KeyTransform('a', KeyTransform('baz', 'value'))), self.objs[7]),
(Q(value__has_key=F('value__baz__a')), self.objs[7]),
(Q(value__has_key=KeyTransform('c', KeyTransform('baz', 'value'))), self.objs[7]),
(Q(value__has_key=F('value__baz__c')), self.objs[7]),
(Q(value__d__1__has_key='f'), self.objs[4]),
(
Q(value__has_key=KeyTransform('f', KeyTransform('1', KeyTransform('d', 'value')))),
self.objs[4],
),
(Q(value__has_key=F('value__d__1__f')), self.objs[4]),
]
for condition, expected in tests:
with self.subTest(condition=condition):
self.assertSequenceEqual(
NullableJSONModel.objects.filter(condition),
[expected],
)
def test_has_key_list(self):
obj = NullableJSONModel.objects.create(value=[{'a': 1}, {'b': 'x'}])
tests = [
Q(value__1__has_key='b'),
Q(value__has_key=KeyTransform('b', KeyTransform(1, 'value'))),
Q(value__has_key=KeyTransform('b', KeyTransform('1', 'value'))),
Q(value__has_key=F('value__1__b')),
]
for condition in tests:
with self.subTest(condition=condition):
self.assertSequenceEqual(
NullableJSONModel.objects.filter(condition),
[obj],
)
def test_has_keys(self):
self.assertSequenceEqual(
NullableJSONModel.objects.filter(value__has_keys=['a', 'c', 'h']),
[self.objs[4]],
)
def test_has_any_keys(self):
self.assertSequenceEqual(
NullableJSONModel.objects.filter(value__has_any_keys=['c', 'l']),
[self.objs[3], self.objs[4], self.objs[6]],
)
@skipUnlessDBFeature('supports_json_field_contains')
def test_contains(self):
tests = [
({}, self.objs[2:5] + self.objs[6:8]),
({'baz': {'a': 'b', 'c': 'd'}}, [self.objs[7]]),
({'baz': {'a': 'b'}}, [self.objs[7]]),
({'baz': {'c': 'd'}}, [self.objs[7]]),
({'k': True, 'l': False}, [self.objs[6]]),
({'d': ['e', {'f': 'g'}]}, [self.objs[4]]),
({'d': ['e']}, [self.objs[4]]),
({'d': [{'f': 'g'}]}, [self.objs[4]]),
([1, [2]], [self.objs[5]]),
([1], [self.objs[5]]),
([[2]], [self.objs[5]]),
({'n': [None]}, [self.objs[4]]),
({'j': None}, [self.objs[4]]),
]
for value, expected in tests:
with self.subTest(value=value):
qs = NullableJSONModel.objects.filter(value__contains=value)
self.assertSequenceEqual(qs, expected)
@skipIfDBFeature('supports_json_field_contains')
def test_contains_unsupported(self):
msg = 'contains lookup is not supported on this database backend.'
with self.assertRaisesMessage(NotSupportedError, msg):
NullableJSONModel.objects.filter(
value__contains={'baz': {'a': 'b', 'c': 'd'}},
).get()
@skipUnlessDBFeature(
'supports_primitives_in_json_field',
'supports_json_field_contains',
)
def test_contains_primitives(self):
for value in self.primitives:
with self.subTest(value=value):
qs = NullableJSONModel.objects.filter(value__contains=value)
self.assertIs(qs.exists(), True)
@skipUnlessDBFeature('supports_json_field_contains')
def test_contained_by(self):
qs = NullableJSONModel.objects.filter(value__contained_by={'a': 'b', 'c': 14, 'h': True})
self.assertSequenceEqual(qs, self.objs[2:4])
@skipIfDBFeature('supports_json_field_contains')
def test_contained_by_unsupported(self):
msg = 'contained_by lookup is not supported on this database backend.'
with self.assertRaisesMessage(NotSupportedError, msg):
NullableJSONModel.objects.filter(value__contained_by={'a': 'b'}).get()
def test_deep_values(self):
qs = NullableJSONModel.objects.values_list('value__k__l')
expected_objs = [(None,)] * len(self.objs)
expected_objs[4] = ('m',)
self.assertSequenceEqual(qs, expected_objs)
@skipUnlessDBFeature('can_distinct_on_fields')
def test_deep_distinct(self):
query = NullableJSONModel.objects.distinct('value__k__l').values_list('value__k__l')
self.assertSequenceEqual(query, [('m',), (None,)])
def test_isnull_key(self):
self.assertSequenceEqual(
NullableJSONModel.objects.filter(value__a__isnull=True),
self.objs[:3] + self.objs[5:],
)
self.assertSequenceEqual(
NullableJSONModel.objects.filter(value__a__isnull=False),
[self.objs[3], self.objs[4]],
)
self.assertSequenceEqual(
NullableJSONModel.objects.filter(value__j__isnull=False),
[self.objs[4]],
)
def test_isnull_key_or_none(self):
obj = NullableJSONModel.objects.create(value={'a': None})
self.assertSequenceEqual(
NullableJSONModel.objects.filter(Q(value__a__isnull=True) | Q(value__a=None)),
self.objs[:3] + self.objs[5:] + [obj],
)
def test_none_key(self):
self.assertSequenceEqual(
NullableJSONModel.objects.filter(value__j=None),
[self.objs[4]],
)
def test_none_key_exclude(self):
obj = NullableJSONModel.objects.create(value={'j': 1})
if connection.vendor == 'oracle':
self.assertSequenceEqual(
NullableJSONModel.objects.exclude(value__j=None),
self.objs[1:4] + self.objs[5:] + [obj],
)
else:
self.assertSequenceEqual(NullableJSONModel.objects.exclude(value__j=None), [obj])
def test_shallow_list_lookup(self):
self.assertSequenceEqual(
NullableJSONModel.objects.filter(value__0=1),
[self.objs[5]],
)
def test_shallow_obj_lookup(self):
self.assertSequenceEqual(
NullableJSONModel.objects.filter(value__a='b'),
[self.objs[3], self.objs[4]],
)
def test_obj_subquery_lookup(self):
qs = NullableJSONModel.objects.annotate(
field=Subquery(NullableJSONModel.objects.filter(pk=OuterRef('pk')).values('value')),
).filter(field__a='b')
self.assertSequenceEqual(qs, [self.objs[3], self.objs[4]])
def test_deep_lookup_objs(self):
self.assertSequenceEqual(
NullableJSONModel.objects.filter(value__k__l='m'),
[self.objs[4]],
)
def test_shallow_lookup_obj_target(self):
self.assertSequenceEqual(
NullableJSONModel.objects.filter(value__k={'l': 'm'}),
[self.objs[4]],
)
def test_deep_lookup_array(self):
self.assertSequenceEqual(
NullableJSONModel.objects.filter(value__1__0=2),
[self.objs[5]],
)
def test_deep_lookup_mixed(self):
self.assertSequenceEqual(
NullableJSONModel.objects.filter(value__d__1__f='g'),
[self.objs[4]],
)
def test_deep_lookup_transform(self):
self.assertSequenceEqual(
NullableJSONModel.objects.filter(value__c__gt=2),
[self.objs[3], self.objs[4]],
)
self.assertSequenceEqual(
NullableJSONModel.objects.filter(value__c__gt=2.33),
[self.objs[3], self.objs[4]],
)
self.assertIs(NullableJSONModel.objects.filter(value__c__lt=5).exists(), False)
def test_lookup_exclude(self):
tests = [
(Q(value__a='b'), [self.objs[0]]),
(Q(value__foo='bax'), [self.objs[0], self.objs[7]]),
]
for condition, expected in tests:
self.assertSequenceEqual(
NullableJSONModel.objects.exclude(condition),
expected,
)
self.assertSequenceEqual(
NullableJSONModel.objects.filter(~condition),
expected,
)
def test_lookup_exclude_nonexistent_key(self):
# Values without the key are ignored.
condition = Q(value__foo='bax')
objs_with_value = [self.objs[6]]
objs_with_different_value = [self.objs[0], self.objs[7]]
self.assertSequenceEqual(
NullableJSONModel.objects.exclude(condition),
objs_with_different_value,
)
self.assertSequenceEqual(
NullableJSONModel.objects.exclude(~condition),
objs_with_value,
)
self.assertCountEqual(
NullableJSONModel.objects.filter(condition | ~condition),
objs_with_value + objs_with_different_value,
)
self.assertCountEqual(
NullableJSONModel.objects.exclude(condition & ~condition),
objs_with_value + objs_with_different_value,
)
# Add the __isnull lookup to get an exhaustive set.
self.assertSequenceEqual(
NullableJSONModel.objects.exclude(condition & Q(value__foo__isnull=False)),
self.objs[0:6] + self.objs[7:],
)
self.assertSequenceEqual(
NullableJSONModel.objects.filter(condition & Q(value__foo__isnull=False)),
objs_with_value,
)
def test_usage_in_subquery(self):
self.assertSequenceEqual(
NullableJSONModel.objects.filter(
id__in=NullableJSONModel.objects.filter(value__c=14),
),
self.objs[3:5],
)
@skipUnlessDBFeature('supports_json_field_contains')
def test_array_key_contains(self):
tests = [
([], [self.objs[7]]),
('bar', [self.objs[7]]),
(['bar'], [self.objs[7]]),
('ar', []),
]
for value, expected in tests:
with self.subTest(value=value):
self.assertSequenceEqual(
NullableJSONModel.objects.filter(value__bar__contains=value),
expected,
)
def test_key_iexact(self):
self.assertIs(NullableJSONModel.objects.filter(value__foo__iexact='BaR').exists(), True)
self.assertIs(NullableJSONModel.objects.filter(value__foo__iexact='"BaR"').exists(), False)
def test_key_in(self):
tests = [
('value__c__in', [14], self.objs[3:5]),
('value__c__in', [14, 15], self.objs[3:5]),
('value__0__in', [1], [self.objs[5]]),
('value__0__in', [1, 3], [self.objs[5]]),
('value__foo__in', ['bar'], [self.objs[7]]),
(
'value__foo__in',
[KeyTransform('foo', KeyTransform('bax', 'value'))],
[self.objs[7]],
),
('value__foo__in', [F('value__bax__foo')], [self.objs[7]]),
(
'value__foo__in',
[KeyTransform('foo', KeyTransform('bax', 'value')), 'baz'],
[self.objs[7]],
),
('value__foo__in', [F('value__bax__foo'), 'baz'], [self.objs[7]]),
('value__foo__in', ['bar', 'baz'], [self.objs[7]]),
('value__bar__in', [['foo', 'bar']], [self.objs[7]]),
('value__bar__in', [['foo', 'bar'], ['a']], [self.objs[7]]),
('value__bax__in', [{'foo': 'bar'}, {'a': 'b'}], [self.objs[7]]),
]
for lookup, value, expected in tests:
with self.subTest(lookup=lookup, value=value):
self.assertSequenceEqual(
NullableJSONModel.objects.filter(**{lookup: value}),
expected,
)
def test_key_values(self):
qs = NullableJSONModel.objects.filter(value__h=True)
tests = [
('value__a', 'b'),
('value__c', 14),
('value__d', ['e', {'f': 'g'}]),
('value__h', True),
('value__i', False),
('value__j', None),
('value__k', {'l': 'm'}),
('value__n', [None]),
('value__p', 4.2),
]
for lookup, expected in tests:
with self.subTest(lookup=lookup):
self.assertEqual(qs.values_list(lookup, flat=True).get(), expected)
@skipUnlessDBFeature('supports_json_field_contains')
def test_key_contains(self):
self.assertIs(NullableJSONModel.objects.filter(value__foo__contains='ar').exists(), False)
self.assertIs(NullableJSONModel.objects.filter(value__foo__contains='bar').exists(), True)
def test_key_icontains(self):
self.assertIs(NullableJSONModel.objects.filter(value__foo__icontains='Ar').exists(), True)
def test_key_startswith(self):
self.assertIs(NullableJSONModel.objects.filter(value__foo__startswith='b').exists(), True)
def test_key_istartswith(self):
self.assertIs(NullableJSONModel.objects.filter(value__foo__istartswith='B').exists(), True)
def test_key_endswith(self):
self.assertIs(NullableJSONModel.objects.filter(value__foo__endswith='r').exists(), True)
def test_key_iendswith(self):
self.assertIs(NullableJSONModel.objects.filter(value__foo__iendswith='R').exists(), True)
def test_key_regex(self):
self.assertIs(NullableJSONModel.objects.filter(value__foo__regex=r'^bar$').exists(), True)
def test_key_iregex(self):
self.assertIs(NullableJSONModel.objects.filter(value__foo__iregex=r'^bAr$').exists(), True)
def test_key_quoted_string(self):
self.assertEqual(
NullableJSONModel.objects.filter(value__o='"quoted"').get(),
self.objs[4],
)
@skipUnlessDBFeature('has_json_operators')
def test_key_sql_injection(self):
with CaptureQueriesContext(connection) as queries:
self.assertIs(
NullableJSONModel.objects.filter(**{
"""value__test' = '"a"') OR 1 = 1 OR ('d""": 'x',
}).exists(),
False,
)
self.assertIn(
"""."value" -> 'test'' = ''"a"'') OR 1 = 1 OR (''d') = '"x"' """,
queries[0]['sql'],
)
@skipIfDBFeature('has_json_operators')
def test_key_sql_injection_escape(self):
query = str(JSONModel.objects.filter(**{
"""value__test") = '"a"' OR 1 = 1 OR ("d""": 'x',
}).query)
self.assertIn('"test\\"', query)
self.assertIn('\\"d', query)
def test_key_escape(self):
obj = NullableJSONModel.objects.create(value={'%total': 10})
self.assertEqual(NullableJSONModel.objects.filter(**{'value__%total': 10}).get(), obj)
def test_none_key_and_exact_lookup(self):
self.assertSequenceEqual(
NullableJSONModel.objects.filter(value__a='b', value__j=None),
[self.objs[4]],
)
def test_lookups_with_key_transform(self):
tests = (
('value__baz__has_key', 'c'),
('value__baz__has_keys', ['a', 'c']),
('value__baz__has_any_keys', ['a', 'x']),
('value__has_key', KeyTextTransform('foo', 'value')),
)
for lookup, value in tests:
with self.subTest(lookup=lookup):
self.assertIs(NullableJSONModel.objects.filter(
**{lookup: value},
).exists(), True)
@skipUnlessDBFeature('supports_json_field_contains')
def test_contains_contained_by_with_key_transform(self):
tests = [
('value__d__contains', 'e'),
('value__d__contains', [{'f': 'g'}]),
('value__contains', KeyTransform('bax', 'value')),
('value__contains', F('value__bax')),
('value__baz__contains', {'a': 'b'}),
('value__baz__contained_by', {'a': 'b', 'c': 'd', 'e': 'f'}),
(
'value__contained_by',
KeyTransform('x', RawSQL(
self.raw_sql,
['{"x": {"a": "b", "c": 1, "d": "e"}}'],
)),
),
]
# For databases where {'f': 'g'} (without surrounding []) matches
# [{'f': 'g'}].
if not connection.features.json_key_contains_list_matching_requires_list:
tests.append(('value__d__contains', {'f': 'g'}))
for lookup, value in tests:
with self.subTest(lookup=lookup, value=value):
self.assertIs(NullableJSONModel.objects.filter(
**{lookup: value},
).exists(), True)
def test_join_key_transform_annotation_expression(self):
related_obj = RelatedJSONModel.objects.create(
value={'d': ['f', 'e']},
json_model=self.objs[4],
)
RelatedJSONModel.objects.create(
value={'d': ['e', 'f']},
json_model=self.objs[4],
)
self.assertSequenceEqual(
RelatedJSONModel.objects.annotate(
key=F('value__d'),
related_key=F('json_model__value__d'),
chain=F('key__1'),
expr=Cast('key', models.JSONField()),
).filter(chain=F('related_key__0')),
[related_obj],
)
| true | true |
f71efd99812e463f3811b77ce6cfe3ce1b3d6a49 | 26,107 | py | Python | run_manager.py | Johnzhjw/CIT2FR-FL-NAS | 53e93075ff1834ab817ad6359025ddafd20e6ef4 | [
"Apache-2.0"
] | null | null | null | run_manager.py | Johnzhjw/CIT2FR-FL-NAS | 53e93075ff1834ab817ad6359025ddafd20e6ef4 | [
"Apache-2.0"
] | null | null | null | run_manager.py | Johnzhjw/CIT2FR-FL-NAS | 53e93075ff1834ab817ad6359025ddafd20e6ef4 | [
"Apache-2.0"
] | null | null | null | # Once for All: Train One Network and Specialize it for Efficient Deployment
# Han Cai, Chuang Gan, Tianzhe Wang, Zhekai Zhang, Song Han
# International Conference on Learning Representations (ICLR), 2020.
import os
import time
import json
import math
from tqdm import tqdm
import numpy as np
import copy
import torch.nn as nn
import torch.nn.functional as F
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.optim
import torchvision
# from imagenet_codebase.utils import *
from ofa.imagenet_codebase.utils import count_parameters, count_net_flops, measure_net_latency, \
cross_entropy_loss_with_soft_target, cross_entropy_with_label_smoothing
from ofa.utils import AverageMeter, accuracy
class RunConfig:
def __init__(self, n_epochs, init_lr, lr_schedule_type, lr_schedule_param,
dataset, train_batch_size, test_batch_size, valid_size,
opt_type, opt_param, weight_decay, label_smoothing, no_decay_keys,
mixup_alpha,
model_init, validation_frequency, print_frequency):
self.n_epochs = n_epochs
self.init_lr = init_lr
self.lr_schedule_type = lr_schedule_type
self.lr_schedule_param = lr_schedule_param
self.dataset = dataset
self.train_batch_size = train_batch_size
self.test_batch_size = test_batch_size
self.valid_size = valid_size
self.opt_type = opt_type
self.opt_param = opt_param
self.weight_decay = weight_decay
self.label_smoothing = label_smoothing
self.no_decay_keys = no_decay_keys
self.mixup_alpha = mixup_alpha
self.model_init = model_init
self.validation_frequency = validation_frequency
self.print_frequency = print_frequency
@property
def config(self):
config = {}
for key in self.__dict__:
if not key.startswith('_'):
config[key] = self.__dict__[key]
return config
def copy(self):
return RunConfig(**self.config)
""" learning rate """
def calc_learning_rate(self, epoch, batch=0, nBatch=None):
if self.lr_schedule_type == 'cosine':
T_total = self.n_epochs * nBatch
T_cur = epoch * nBatch + batch
lr = 0.5 * self.init_lr * (1 + math.cos(math.pi * T_cur / T_total))
elif self.lr_schedule_type is None:
lr = self.init_lr
else:
raise ValueError('do not support: %s' % self.lr_schedule_type)
return lr
def adjust_learning_rate(self, optimizer, epoch, batch=0, nBatch=None):
""" adjust learning of a given optimizer and return the new learning rate """
new_lr = self.calc_learning_rate(epoch, batch, nBatch)
for param_group in optimizer.param_groups:
param_group['lr'] = new_lr
return new_lr
def warmup_adjust_learning_rate(self, optimizer, T_total, nBatch, epoch, batch=0, warmup_lr=0):
T_cur = epoch * nBatch + batch + 1
new_lr = T_cur / T_total * (self.init_lr - warmup_lr) + warmup_lr
for param_group in optimizer.param_groups:
param_group['lr'] = new_lr
return new_lr
""" data provider """
@property
def data_provider(self):
raise NotImplementedError
def train_FL_loader(self, _):
return self.data_provider.train_splits[_]
@property
def train_loader(self):
return self.data_provider.train
@property
def valid_loader(self):
return self.data_provider.valid
@property
def test_loader(self):
return self.data_provider.test
def random_sub_train_loader(self, n_images, batch_size, num_worker=None, num_replicas=None, rank=None, tag_FL=-1):
return self.data_provider.build_sub_train_loader(n_images, batch_size, num_worker, num_replicas, rank, tag_FL)
""" optimizer """
def build_optimizer(self, net_params):
if self.no_decay_keys is not None:
assert isinstance(net_params, list) and len(net_params) == 2
net_params = [
{'params': net_params[0], 'weight_decay': self.weight_decay},
{'params': net_params[1], 'weight_decay': 0},
]
else:
net_params = [{'params': net_params, 'weight_decay': self.weight_decay}]
if self.opt_type == 'sgd':
opt_param = {} if self.opt_param is None else self.opt_param
momentum, nesterov = opt_param.get('momentum', 0.9), opt_param.get('nesterov', True)
optimizer = torch.optim.SGD(net_params, self.init_lr, momentum=momentum, nesterov=nesterov)
elif self.opt_type == 'adam':
optimizer = torch.optim.Adam(net_params, self.init_lr)
else:
raise NotImplementedError
return optimizer
def get_net_info(net, input_shape=(3, 224, 224), measure_latency=None, print_info=True):
net_info = {}
if isinstance(net, nn.DataParallel):
net = net.module
# parameters
net_info['params'] = count_parameters(net)
# flops
net_info['flops'] = count_net_flops(net, [2] + list(input_shape))/2
# latencies
latency_types = [] if measure_latency is None else measure_latency.split('#')
for l_type in latency_types:
latency, measured_latency = measure_net_latency(net, l_type, fast=False, input_shape=input_shape)
net_info['%s latency' % l_type] = {
'val': latency,
'hist': measured_latency
}
if print_info:
print(net)
print('Total training params: %.2fM' % (net_info['params'] / 1e6))
print('Total FLOPs: %.2fM' % (net_info['flops'] / 1e6))
for l_type in latency_types:
print('Estimated %s latency: %.3fms' % (l_type, net_info['%s latency' % l_type]['val']))
return net_info
class RunManager:
def __init__(self, path, net, run_config: RunConfig, init=True, measure_latency=None, no_gpu=False, mix_prec=None):
self.path = path
self.net = net
self.run_config = run_config
self.mix_prec = mix_prec
self.best_acc = 0
self.start_epoch = 0
os.makedirs(self.path, exist_ok=True)
# move network to GPU if available
if torch.cuda.is_available() and (not no_gpu):
self.device = torch.device('cuda:0')
self.net = self.net.to(self.device)
cudnn.benchmark = True
else:
self.device = torch.device('cpu')
# initialize model (default)
if init:
self.network.init_model(run_config.model_init)
# net info
net_info = get_net_info(self.net, self.run_config.data_provider.data_shape, measure_latency, True)
with open('%s/net_info.txt' % self.path, 'w') as fout:
fout.write(json.dumps(net_info, indent=4) + '\n')
try:
fout.write(self.network.module_str)
except Exception:
pass
# criterion
if isinstance(self.run_config.mixup_alpha, float):
self.train_criterion = cross_entropy_loss_with_soft_target
elif self.run_config.label_smoothing > 0:
self.train_criterion = lambda pred, target: \
cross_entropy_with_label_smoothing(pred, target, self.run_config.label_smoothing)
else:
self.train_criterion = nn.CrossEntropyLoss()
self.test_criterion = nn.CrossEntropyLoss()
# optimizer
if self.run_config.no_decay_keys:
keys = self.run_config.no_decay_keys.split('#')
net_params = [
self.network.get_parameters(keys, mode='exclude'), # parameters with weight decay
self.network.get_parameters(keys, mode='include'), # parameters without weight decay
]
else:
try:
net_params = self.network.weight_parameters()
except Exception:
net_params = self.network.parameters()
self.optimizer = self.run_config.build_optimizer(net_params)
if mix_prec is not None:
from apex import amp
self.network, self.optimizer = amp.initialize(self.network, self.optimizer, opt_level=mix_prec)
self.net = torch.nn.DataParallel(self.net)
def init_FL(self, flag_reset_running_statistics):
""" FL """
if self.run_config.flag_FL:
self.nets_FL = []
self.optimizers_FL = []
for _ in range(self.run_config.size_FL):
self.nets_FL.append(copy.deepcopy(self.net))
if flag_reset_running_statistics:
self.reset_running_statistics(self.network_FL(_), _)
for _ in range(self.run_config.size_FL):
if self.run_config.no_decay_keys:
keys = self.run_config.no_decay_keys.split('#')
net_params = [
self.network_FL(_).get_parameters(keys, mode='exclude'), # parameters with weight decay
self.network_FL(_).get_parameters(keys, mode='include'), # parameters without weight decay
]
else:
try:
net_params = self.network_FL(_).weight_parameters()
except Exception:
net_params = self.network_FL(_).parameters()
self.optimizers_FL.append(self.run_config.build_optimizer(net_params))
""" save path and log path """
@property
def save_path(self):
if self.__dict__.get('_save_path', None) is None:
save_path = os.path.join(self.path, 'checkpoint')
os.makedirs(save_path, exist_ok=True)
self.__dict__['_save_path'] = save_path
return self.__dict__['_save_path']
@property
def logs_path(self):
if self.__dict__.get('_logs_path', None) is None:
logs_path = os.path.join(self.path, 'logs')
os.makedirs(logs_path, exist_ok=True)
self.__dict__['_logs_path'] = logs_path
return self.__dict__['_logs_path']
def network_FL(self, _):
if isinstance(self.nets_FL[_], nn.DataParallel):
return self.nets_FL[_].module
else:
return self.nets_FL[_]
@property
def network(self):
if isinstance(self.net, nn.DataParallel):
return self.net.module
else:
return self.net
@network.setter
def network(self, new_val):
if isinstance(self.net, nn.DataParallel):
self.net.module = new_val
else:
self.net = new_val
def write_log(self, log_str, prefix='valid', should_print=True):
""" prefix: valid, train, test """
if prefix in ['valid', 'test']:
with open(os.path.join(self.logs_path, 'valid_console.txt'), 'a') as fout:
fout.write(log_str + '\n')
fout.flush()
if prefix in ['valid', 'test', 'train']:
with open(os.path.join(self.logs_path, 'train_console.txt'), 'a') as fout:
if prefix in ['valid', 'test']:
fout.write('=' * 10)
fout.write(log_str + '\n')
fout.flush()
else:
with open(os.path.join(self.logs_path, '%s.txt' % prefix), 'a') as fout:
fout.write(log_str + '\n')
fout.flush()
if should_print:
print(log_str)
""" save and load models """
def save_model(self, checkpoint=None, is_best=False, model_name=None):
if checkpoint is None:
checkpoint = {'state_dict': self.network.state_dict()}
if model_name is None:
model_name = 'checkpoint.pth.tar'
if self.mix_prec is not None:
from apex import amp
checkpoint['amp'] = amp.state_dict()
checkpoint['dataset'] = self.run_config.dataset # add `dataset` info to the checkpoint
latest_fname = os.path.join(self.save_path, 'latest.txt')
model_path = os.path.join(self.save_path, model_name)
with open(latest_fname, 'w') as fout:
fout.write(model_path + '\n')
torch.save(checkpoint, model_path)
if is_best:
best_path = os.path.join(self.save_path, 'model_best.pth.tar')
torch.save({'state_dict': checkpoint['state_dict']}, best_path)
def load_model(self, model_fname=None):
latest_fname = os.path.join(self.save_path, 'latest.txt')
if model_fname is None and os.path.exists(latest_fname):
with open(latest_fname, 'r') as fin:
model_fname = fin.readline()
if model_fname[-1] == '\n':
model_fname = model_fname[:-1]
try:
if model_fname is None or not os.path.exists(model_fname):
model_fname = '%s/checkpoint.pth.tar' % self.save_path
with open(latest_fname, 'w') as fout:
fout.write(model_fname + '\n')
print("=> loading checkpoint '{}'".format(model_fname))
if torch.cuda.is_available():
checkpoint = torch.load(model_fname)
else:
checkpoint = torch.load(model_fname, map_location='cpu')
self.network.load_state_dict(checkpoint['state_dict'])
if 'epoch' in checkpoint:
self.start_epoch = checkpoint['epoch'] + 1
if 'best_acc' in checkpoint:
self.best_acc = checkpoint['best_acc']
if 'optimizer' in checkpoint:
self.optimizer.load_state_dict(checkpoint['optimizer'])
if self.mix_prec is not None and 'amp' in checkpoint:
from apex import amp
amp.load_state_dict(checkpoint['amp'])
print("=> loaded checkpoint '{}'".format(model_fname))
except Exception:
print('fail to load checkpoint from %s' % self.save_path)
def save_config(self):
""" dump run_config and net_config to the model_folder """
net_save_path = os.path.join(self.path, 'net.config')
json.dump(self.network.config, open(net_save_path, 'w'), indent=4)
print('Network configs dump to %s' % net_save_path)
run_save_path = os.path.join(self.path, 'run.config')
json.dump(self.run_config.config, open(run_save_path, 'w'), indent=4)
print('Run configs dump to %s' % run_save_path)
""" train and test """
def validate(self, epoch=0, is_test=True, run_str='', net=None, data_loader=None, no_logs=False):
if net is None:
net = self.net
if not isinstance(net, nn.DataParallel):
net = nn.DataParallel(net)
if data_loader is None:
if is_test:
data_loader = self.run_config.test_loader
else:
data_loader = self.run_config.valid_loader
net.eval()
losses = AverageMeter()
top1 = AverageMeter()
top5 = AverageMeter()
with torch.no_grad():
with tqdm(total=len(data_loader),
desc='Validate Epoch #{} {}'.format(epoch + 1, run_str), disable=no_logs) as t:
for i, (images, labels) in enumerate(data_loader):
images, labels = images.to(self.device), labels.to(self.device)
# compute output
output = net(images)
loss = self.test_criterion(output, labels.long())
# measure accuracy and record loss
acc1 = accuracy(output, labels, topk=(1,))
losses.update(loss.item(), images.size(0))
top1.update(acc1[0].item(), images.size(0))
t.set_postfix({
'loss': losses.avg,
'top1': top1.avg,
'img_size': images.size(2),
})
t.update(1)
return losses.avg, top1.avg
def validate_all_resolution(self, epoch=0, is_test=True, net=None):
if net is None:
net = self.network
if isinstance(self.run_config.data_provider.image_size, list):
img_size_list, loss_list, top1_list, top5_list = [], [], [], []
for img_size in self.run_config.data_provider.image_size:
img_size_list.append(img_size)
self.run_config.data_provider.assign_active_img_size(img_size)
if not self.run_config.flag_FL:
self.reset_running_statistics(net=net)
else:
self.reset_running_statistics(net=None, tag_FL=self.run_config.size_FL)
loss, top1 = self.validate(epoch, is_test, net=net)
loss_list.append(loss)
top1_list.append(top1)
return img_size_list, loss_list, top1_list
else:
loss, top1 = self.validate(epoch, is_test, net=net)
return [self.run_config.data_provider.active_img_size], [loss], [top1]
def train_one_epoch(self, args, epoch, warmup_epochs=0, warmup_lr=0, tag_FL=-1):
# switch to train mode
if tag_FL >= 0:
self.nets_FL[tag_FL].train()
else:
self.net.train()
if tag_FL >= 0:
data_loader = self.run_config.train_FL_loader(tag_FL)
else:
data_loader = self.run_config.train_loader
nBatch = len(data_loader)
losses = AverageMeter()
top1 = AverageMeter()
top5 = AverageMeter()
data_time = AverageMeter()
with tqdm(total=nBatch,
desc='Train Epoch #{}'.format(epoch + 1)) as t:
end = time.time()
for i, (images, labels) in enumerate(data_loader):
data_time.update(time.time() - end)
if tag_FL >= 0:
if epoch < warmup_epochs:
new_lr = self.run_config.warmup_adjust_learning_rate(
self.optimizers_FL[tag_FL], warmup_epochs * nBatch, nBatch, epoch, i, warmup_lr,
)
else:
new_lr = self.run_config.adjust_learning_rate(self.optimizers_FL[tag_FL], epoch - warmup_epochs, i, nBatch)
else:
if epoch < warmup_epochs:
new_lr = self.run_config.warmup_adjust_learning_rate(
self.optimizer, warmup_epochs * nBatch, nBatch, epoch, i, warmup_lr,
)
else:
new_lr = self.run_config.adjust_learning_rate(self.optimizer, epoch - warmup_epochs, i, nBatch)
images, labels = images.to(self.device), labels.to(self.device)
target = labels
# soft target
if args.teacher_model is not None:
args.teacher_model.train()
with torch.no_grad():
soft_logits = args.teacher_model(images).detach()
soft_label = F.softmax(soft_logits, dim=1)
# compute output
if isinstance(self.network, torchvision.models.Inception3):
if tag_FL >= 0:
output, aux_outputs = self.nets_FL[tag_FL](images)
else:
output, aux_outputs = self.net(images)
loss1 = self.train_criterion(output, labels.long())
loss2 = self.train_criterion(aux_outputs, labels.long())
loss = loss1 + 0.4 * loss2
else:
if tag_FL >= 0:
output = self.nets_FL[tag_FL](images)
else:
output = self.net(images)
loss = self.train_criterion(output, labels.long())
if args.teacher_model is None:
loss_type = 'ce'
else:
if args.kd_type == 'ce':
kd_loss = cross_entropy_loss_with_soft_target(output, soft_label)
else:
kd_loss = F.mse_loss(output, soft_logits)
loss = args.kd_ratio * kd_loss + loss
loss_type = '%.1fkd-%s & ce' % (args.kd_ratio, args.kd_type)
# compute gradient and do SGD step
if tag_FL >= 0:
self.nets_FL[tag_FL].zero_grad() # or self.optimizer.zero_grad()
else:
self.net.zero_grad() # or self.optimizer.zero_grad()
if self.mix_prec is not None:
from apex import amp
if tag_FL >= 0:
with amp.scale_loss(loss, self.optimizers_FL[tag_FL]) as scaled_loss:
scaled_loss.backward()
else:
with amp.scale_loss(loss, self.optimizer) as scaled_loss:
scaled_loss.backward()
else:
loss.backward()
if tag_FL >= 0:
self.optimizers_FL[tag_FL].step()
else:
self.optimizer.step()
# measure accuracy and record loss
acc1 = accuracy(output, target, topk=(1,))
losses.update(loss.item(), images.size(0))
top1.update(acc1[0].item(), images.size(0))
t.set_postfix({
'loss': losses.avg,
'top1': top1.avg,
'img_size': images.size(2),
'lr': new_lr,
'loss_type': loss_type,
'data_time': data_time.avg,
})
t.update(1)
end = time.time()
return losses.avg, top1.avg
def FedAvg(self):
if self.run_config.flag_FL:
with torch.no_grad():
base_state = self.network.state_dict()
all_states = []
for _ in range(self.run_config.size_FL):
model = self.network_FL(_)
all_states.append(model.state_dict())
for name in base_state:
for _ in range(self.run_config.size_FL):
# print(all_states[_][name].shape)
# print(all_states[_][name])
tmp_state = (all_states[_][name] / self.run_config.size_FL) if _ == 0 else \
tmp_state + (all_states[_][name] / self.run_config.size_FL)
base_state[name].copy_(tmp_state)
self.network.load_state_dict(base_state)
for _ in range(self.run_config.size_FL):
self.network_FL(_).load_state_dict(base_state)
def train(self, args, warmup_epoch=0, warmup_lr=0, flag_reset_running_statistics=False):
self.init_FL(flag_reset_running_statistics)
for epoch in range(self.start_epoch, self.run_config.n_epochs + warmup_epoch):
if not self.run_config.flag_FL:
train_loss, train_top1 = self.train_one_epoch(args, epoch, warmup_epoch, warmup_lr)
else:
train_loss, train_top1 = [], []
for _ in range(self.run_config.size_FL):
loss, top1 = self.train_one_epoch(args, epoch, warmup_epoch, warmup_lr, _)
train_loss.append(loss)
train_top1.append(top1)
train_loss = np.mean(train_loss)
train_top1 = np.mean(train_top1)
self.FedAvg()
if (epoch + 1) % self.run_config.validation_frequency == 0:
img_size, val_loss, val_acc = self.validate_all_resolution(epoch=epoch, is_test=False)
is_best = np.mean(val_acc) > self.best_acc
self.best_acc = max(self.best_acc, np.mean(val_acc))
val_log = 'Valid [{0}/{1}]\tloss {2:.3f}\ttop-1 acc {3:.3f} ({4:.3f})'. \
format(epoch + 1 - warmup_epoch, self.run_config.n_epochs,
np.mean(val_loss), np.mean(val_acc), self.best_acc)
val_log += '\tTrain top-1 {top1:.3f}\tloss {train_loss:.3f}\t'. \
format(top1=train_top1, train_loss=train_loss)
for i_s, v_a in zip(img_size, val_acc):
val_log += '(%d, %.3f), ' % (i_s, v_a)
self.write_log(val_log, prefix='valid', should_print=False)
else:
is_best = False
self.save_model({
'epoch': epoch,
'best_acc': self.best_acc,
'optimizer': self.optimizer.state_dict(),
'state_dict': self.network.state_dict(),
}, is_best=is_best)
return self.network
def reset_running_statistics(self, net=None, tag_FL=-1):
from ofa.elastic_nn.utils import set_running_statistics
if tag_FL == -1:
if net is None:
net = self.network
sub_train_loader = self.run_config.random_sub_train_loader(2000, 100)
set_running_statistics(net, sub_train_loader)
elif tag_FL == self.run_config.size_FL:
if not self.run_config.flag_FL:
print('Wrong FL client ID')
import sys
sys.exit()
for _ in range(tag_FL):
self.reset_running_statistics(self.network_FL(_), _)
self.FedAvg()
else:
if tag_FL < 0 or tag_FL >= self.run_config.size_FL or not self.run_config.flag_FL:
print('Wrong FL client ID')
import sys
sys.exit()
if net is None:
net = self.network_FL(tag_FL)
sub_train_loader = self.run_config.random_sub_train_loader(2000, 100, tag_FL=tag_FL)
set_running_statistics(net, sub_train_loader)
| 40.792188 | 131 | 0.569847 |
import os
import time
import json
import math
from tqdm import tqdm
import numpy as np
import copy
import torch.nn as nn
import torch.nn.functional as F
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.optim
import torchvision
from ofa.imagenet_codebase.utils import count_parameters, count_net_flops, measure_net_latency, \
cross_entropy_loss_with_soft_target, cross_entropy_with_label_smoothing
from ofa.utils import AverageMeter, accuracy
class RunConfig:
def __init__(self, n_epochs, init_lr, lr_schedule_type, lr_schedule_param,
dataset, train_batch_size, test_batch_size, valid_size,
opt_type, opt_param, weight_decay, label_smoothing, no_decay_keys,
mixup_alpha,
model_init, validation_frequency, print_frequency):
self.n_epochs = n_epochs
self.init_lr = init_lr
self.lr_schedule_type = lr_schedule_type
self.lr_schedule_param = lr_schedule_param
self.dataset = dataset
self.train_batch_size = train_batch_size
self.test_batch_size = test_batch_size
self.valid_size = valid_size
self.opt_type = opt_type
self.opt_param = opt_param
self.weight_decay = weight_decay
self.label_smoothing = label_smoothing
self.no_decay_keys = no_decay_keys
self.mixup_alpha = mixup_alpha
self.model_init = model_init
self.validation_frequency = validation_frequency
self.print_frequency = print_frequency
@property
def config(self):
config = {}
for key in self.__dict__:
if not key.startswith('_'):
config[key] = self.__dict__[key]
return config
def copy(self):
return RunConfig(**self.config)
def calc_learning_rate(self, epoch, batch=0, nBatch=None):
if self.lr_schedule_type == 'cosine':
T_total = self.n_epochs * nBatch
T_cur = epoch * nBatch + batch
lr = 0.5 * self.init_lr * (1 + math.cos(math.pi * T_cur / T_total))
elif self.lr_schedule_type is None:
lr = self.init_lr
else:
raise ValueError('do not support: %s' % self.lr_schedule_type)
return lr
def adjust_learning_rate(self, optimizer, epoch, batch=0, nBatch=None):
new_lr = self.calc_learning_rate(epoch, batch, nBatch)
for param_group in optimizer.param_groups:
param_group['lr'] = new_lr
return new_lr
def warmup_adjust_learning_rate(self, optimizer, T_total, nBatch, epoch, batch=0, warmup_lr=0):
T_cur = epoch * nBatch + batch + 1
new_lr = T_cur / T_total * (self.init_lr - warmup_lr) + warmup_lr
for param_group in optimizer.param_groups:
param_group['lr'] = new_lr
return new_lr
@property
def data_provider(self):
raise NotImplementedError
def train_FL_loader(self, _):
return self.data_provider.train_splits[_]
@property
def train_loader(self):
return self.data_provider.train
@property
def valid_loader(self):
return self.data_provider.valid
@property
def test_loader(self):
return self.data_provider.test
def random_sub_train_loader(self, n_images, batch_size, num_worker=None, num_replicas=None, rank=None, tag_FL=-1):
return self.data_provider.build_sub_train_loader(n_images, batch_size, num_worker, num_replicas, rank, tag_FL)
def build_optimizer(self, net_params):
if self.no_decay_keys is not None:
assert isinstance(net_params, list) and len(net_params) == 2
net_params = [
{'params': net_params[0], 'weight_decay': self.weight_decay},
{'params': net_params[1], 'weight_decay': 0},
]
else:
net_params = [{'params': net_params, 'weight_decay': self.weight_decay}]
if self.opt_type == 'sgd':
opt_param = {} if self.opt_param is None else self.opt_param
momentum, nesterov = opt_param.get('momentum', 0.9), opt_param.get('nesterov', True)
optimizer = torch.optim.SGD(net_params, self.init_lr, momentum=momentum, nesterov=nesterov)
elif self.opt_type == 'adam':
optimizer = torch.optim.Adam(net_params, self.init_lr)
else:
raise NotImplementedError
return optimizer
def get_net_info(net, input_shape=(3, 224, 224), measure_latency=None, print_info=True):
net_info = {}
if isinstance(net, nn.DataParallel):
net = net.module
net_info['params'] = count_parameters(net)
net_info['flops'] = count_net_flops(net, [2] + list(input_shape))/2
latency_types = [] if measure_latency is None else measure_latency.split('#')
for l_type in latency_types:
latency, measured_latency = measure_net_latency(net, l_type, fast=False, input_shape=input_shape)
net_info['%s latency' % l_type] = {
'val': latency,
'hist': measured_latency
}
if print_info:
print(net)
print('Total training params: %.2fM' % (net_info['params'] / 1e6))
print('Total FLOPs: %.2fM' % (net_info['flops'] / 1e6))
for l_type in latency_types:
print('Estimated %s latency: %.3fms' % (l_type, net_info['%s latency' % l_type]['val']))
return net_info
class RunManager:
def __init__(self, path, net, run_config: RunConfig, init=True, measure_latency=None, no_gpu=False, mix_prec=None):
self.path = path
self.net = net
self.run_config = run_config
self.mix_prec = mix_prec
self.best_acc = 0
self.start_epoch = 0
os.makedirs(self.path, exist_ok=True)
if torch.cuda.is_available() and (not no_gpu):
self.device = torch.device('cuda:0')
self.net = self.net.to(self.device)
cudnn.benchmark = True
else:
self.device = torch.device('cpu')
if init:
self.network.init_model(run_config.model_init)
net_info = get_net_info(self.net, self.run_config.data_provider.data_shape, measure_latency, True)
with open('%s/net_info.txt' % self.path, 'w') as fout:
fout.write(json.dumps(net_info, indent=4) + '\n')
try:
fout.write(self.network.module_str)
except Exception:
pass
if isinstance(self.run_config.mixup_alpha, float):
self.train_criterion = cross_entropy_loss_with_soft_target
elif self.run_config.label_smoothing > 0:
self.train_criterion = lambda pred, target: \
cross_entropy_with_label_smoothing(pred, target, self.run_config.label_smoothing)
else:
self.train_criterion = nn.CrossEntropyLoss()
self.test_criterion = nn.CrossEntropyLoss()
if self.run_config.no_decay_keys:
keys = self.run_config.no_decay_keys.split('#')
net_params = [
self.network.get_parameters(keys, mode='exclude'),
self.network.get_parameters(keys, mode='include'),
]
else:
try:
net_params = self.network.weight_parameters()
except Exception:
net_params = self.network.parameters()
self.optimizer = self.run_config.build_optimizer(net_params)
if mix_prec is not None:
from apex import amp
self.network, self.optimizer = amp.initialize(self.network, self.optimizer, opt_level=mix_prec)
self.net = torch.nn.DataParallel(self.net)
def init_FL(self, flag_reset_running_statistics):
if self.run_config.flag_FL:
self.nets_FL = []
self.optimizers_FL = []
for _ in range(self.run_config.size_FL):
self.nets_FL.append(copy.deepcopy(self.net))
if flag_reset_running_statistics:
self.reset_running_statistics(self.network_FL(_), _)
for _ in range(self.run_config.size_FL):
if self.run_config.no_decay_keys:
keys = self.run_config.no_decay_keys.split('#')
net_params = [
self.network_FL(_).get_parameters(keys, mode='exclude'),
self.network_FL(_).get_parameters(keys, mode='include'),
]
else:
try:
net_params = self.network_FL(_).weight_parameters()
except Exception:
net_params = self.network_FL(_).parameters()
self.optimizers_FL.append(self.run_config.build_optimizer(net_params))
@property
def save_path(self):
if self.__dict__.get('_save_path', None) is None:
save_path = os.path.join(self.path, 'checkpoint')
os.makedirs(save_path, exist_ok=True)
self.__dict__['_save_path'] = save_path
return self.__dict__['_save_path']
@property
def logs_path(self):
if self.__dict__.get('_logs_path', None) is None:
logs_path = os.path.join(self.path, 'logs')
os.makedirs(logs_path, exist_ok=True)
self.__dict__['_logs_path'] = logs_path
return self.__dict__['_logs_path']
def network_FL(self, _):
if isinstance(self.nets_FL[_], nn.DataParallel):
return self.nets_FL[_].module
else:
return self.nets_FL[_]
@property
def network(self):
if isinstance(self.net, nn.DataParallel):
return self.net.module
else:
return self.net
@network.setter
def network(self, new_val):
if isinstance(self.net, nn.DataParallel):
self.net.module = new_val
else:
self.net = new_val
def write_log(self, log_str, prefix='valid', should_print=True):
if prefix in ['valid', 'test']:
with open(os.path.join(self.logs_path, 'valid_console.txt'), 'a') as fout:
fout.write(log_str + '\n')
fout.flush()
if prefix in ['valid', 'test', 'train']:
with open(os.path.join(self.logs_path, 'train_console.txt'), 'a') as fout:
if prefix in ['valid', 'test']:
fout.write('=' * 10)
fout.write(log_str + '\n')
fout.flush()
else:
with open(os.path.join(self.logs_path, '%s.txt' % prefix), 'a') as fout:
fout.write(log_str + '\n')
fout.flush()
if should_print:
print(log_str)
def save_model(self, checkpoint=None, is_best=False, model_name=None):
if checkpoint is None:
checkpoint = {'state_dict': self.network.state_dict()}
if model_name is None:
model_name = 'checkpoint.pth.tar'
if self.mix_prec is not None:
from apex import amp
checkpoint['amp'] = amp.state_dict()
checkpoint['dataset'] = self.run_config.dataset
latest_fname = os.path.join(self.save_path, 'latest.txt')
model_path = os.path.join(self.save_path, model_name)
with open(latest_fname, 'w') as fout:
fout.write(model_path + '\n')
torch.save(checkpoint, model_path)
if is_best:
best_path = os.path.join(self.save_path, 'model_best.pth.tar')
torch.save({'state_dict': checkpoint['state_dict']}, best_path)
def load_model(self, model_fname=None):
latest_fname = os.path.join(self.save_path, 'latest.txt')
if model_fname is None and os.path.exists(latest_fname):
with open(latest_fname, 'r') as fin:
model_fname = fin.readline()
if model_fname[-1] == '\n':
model_fname = model_fname[:-1]
try:
if model_fname is None or not os.path.exists(model_fname):
model_fname = '%s/checkpoint.pth.tar' % self.save_path
with open(latest_fname, 'w') as fout:
fout.write(model_fname + '\n')
print("=> loading checkpoint '{}'".format(model_fname))
if torch.cuda.is_available():
checkpoint = torch.load(model_fname)
else:
checkpoint = torch.load(model_fname, map_location='cpu')
self.network.load_state_dict(checkpoint['state_dict'])
if 'epoch' in checkpoint:
self.start_epoch = checkpoint['epoch'] + 1
if 'best_acc' in checkpoint:
self.best_acc = checkpoint['best_acc']
if 'optimizer' in checkpoint:
self.optimizer.load_state_dict(checkpoint['optimizer'])
if self.mix_prec is not None and 'amp' in checkpoint:
from apex import amp
amp.load_state_dict(checkpoint['amp'])
print("=> loaded checkpoint '{}'".format(model_fname))
except Exception:
print('fail to load checkpoint from %s' % self.save_path)
def save_config(self):
net_save_path = os.path.join(self.path, 'net.config')
json.dump(self.network.config, open(net_save_path, 'w'), indent=4)
print('Network configs dump to %s' % net_save_path)
run_save_path = os.path.join(self.path, 'run.config')
json.dump(self.run_config.config, open(run_save_path, 'w'), indent=4)
print('Run configs dump to %s' % run_save_path)
def validate(self, epoch=0, is_test=True, run_str='', net=None, data_loader=None, no_logs=False):
if net is None:
net = self.net
if not isinstance(net, nn.DataParallel):
net = nn.DataParallel(net)
if data_loader is None:
if is_test:
data_loader = self.run_config.test_loader
else:
data_loader = self.run_config.valid_loader
net.eval()
losses = AverageMeter()
top1 = AverageMeter()
top5 = AverageMeter()
with torch.no_grad():
with tqdm(total=len(data_loader),
desc='Validate Epoch #{} {}'.format(epoch + 1, run_str), disable=no_logs) as t:
for i, (images, labels) in enumerate(data_loader):
images, labels = images.to(self.device), labels.to(self.device)
output = net(images)
loss = self.test_criterion(output, labels.long())
acc1 = accuracy(output, labels, topk=(1,))
losses.update(loss.item(), images.size(0))
top1.update(acc1[0].item(), images.size(0))
t.set_postfix({
'loss': losses.avg,
'top1': top1.avg,
'img_size': images.size(2),
})
t.update(1)
return losses.avg, top1.avg
def validate_all_resolution(self, epoch=0, is_test=True, net=None):
if net is None:
net = self.network
if isinstance(self.run_config.data_provider.image_size, list):
img_size_list, loss_list, top1_list, top5_list = [], [], [], []
for img_size in self.run_config.data_provider.image_size:
img_size_list.append(img_size)
self.run_config.data_provider.assign_active_img_size(img_size)
if not self.run_config.flag_FL:
self.reset_running_statistics(net=net)
else:
self.reset_running_statistics(net=None, tag_FL=self.run_config.size_FL)
loss, top1 = self.validate(epoch, is_test, net=net)
loss_list.append(loss)
top1_list.append(top1)
return img_size_list, loss_list, top1_list
else:
loss, top1 = self.validate(epoch, is_test, net=net)
return [self.run_config.data_provider.active_img_size], [loss], [top1]
def train_one_epoch(self, args, epoch, warmup_epochs=0, warmup_lr=0, tag_FL=-1):
if tag_FL >= 0:
self.nets_FL[tag_FL].train()
else:
self.net.train()
if tag_FL >= 0:
data_loader = self.run_config.train_FL_loader(tag_FL)
else:
data_loader = self.run_config.train_loader
nBatch = len(data_loader)
losses = AverageMeter()
top1 = AverageMeter()
top5 = AverageMeter()
data_time = AverageMeter()
with tqdm(total=nBatch,
desc='Train Epoch #{}'.format(epoch + 1)) as t:
end = time.time()
for i, (images, labels) in enumerate(data_loader):
data_time.update(time.time() - end)
if tag_FL >= 0:
if epoch < warmup_epochs:
new_lr = self.run_config.warmup_adjust_learning_rate(
self.optimizers_FL[tag_FL], warmup_epochs * nBatch, nBatch, epoch, i, warmup_lr,
)
else:
new_lr = self.run_config.adjust_learning_rate(self.optimizers_FL[tag_FL], epoch - warmup_epochs, i, nBatch)
else:
if epoch < warmup_epochs:
new_lr = self.run_config.warmup_adjust_learning_rate(
self.optimizer, warmup_epochs * nBatch, nBatch, epoch, i, warmup_lr,
)
else:
new_lr = self.run_config.adjust_learning_rate(self.optimizer, epoch - warmup_epochs, i, nBatch)
images, labels = images.to(self.device), labels.to(self.device)
target = labels
if args.teacher_model is not None:
args.teacher_model.train()
with torch.no_grad():
soft_logits = args.teacher_model(images).detach()
soft_label = F.softmax(soft_logits, dim=1)
if isinstance(self.network, torchvision.models.Inception3):
if tag_FL >= 0:
output, aux_outputs = self.nets_FL[tag_FL](images)
else:
output, aux_outputs = self.net(images)
loss1 = self.train_criterion(output, labels.long())
loss2 = self.train_criterion(aux_outputs, labels.long())
loss = loss1 + 0.4 * loss2
else:
if tag_FL >= 0:
output = self.nets_FL[tag_FL](images)
else:
output = self.net(images)
loss = self.train_criterion(output, labels.long())
if args.teacher_model is None:
loss_type = 'ce'
else:
if args.kd_type == 'ce':
kd_loss = cross_entropy_loss_with_soft_target(output, soft_label)
else:
kd_loss = F.mse_loss(output, soft_logits)
loss = args.kd_ratio * kd_loss + loss
loss_type = '%.1fkd-%s & ce' % (args.kd_ratio, args.kd_type)
if tag_FL >= 0:
self.nets_FL[tag_FL].zero_grad()
else:
self.net.zero_grad()
if self.mix_prec is not None:
from apex import amp
if tag_FL >= 0:
with amp.scale_loss(loss, self.optimizers_FL[tag_FL]) as scaled_loss:
scaled_loss.backward()
else:
with amp.scale_loss(loss, self.optimizer) as scaled_loss:
scaled_loss.backward()
else:
loss.backward()
if tag_FL >= 0:
self.optimizers_FL[tag_FL].step()
else:
self.optimizer.step()
acc1 = accuracy(output, target, topk=(1,))
losses.update(loss.item(), images.size(0))
top1.update(acc1[0].item(), images.size(0))
t.set_postfix({
'loss': losses.avg,
'top1': top1.avg,
'img_size': images.size(2),
'lr': new_lr,
'loss_type': loss_type,
'data_time': data_time.avg,
})
t.update(1)
end = time.time()
return losses.avg, top1.avg
def FedAvg(self):
if self.run_config.flag_FL:
with torch.no_grad():
base_state = self.network.state_dict()
all_states = []
for _ in range(self.run_config.size_FL):
model = self.network_FL(_)
all_states.append(model.state_dict())
for name in base_state:
for _ in range(self.run_config.size_FL):
tmp_state = (all_states[_][name] / self.run_config.size_FL) if _ == 0 else \
tmp_state + (all_states[_][name] / self.run_config.size_FL)
base_state[name].copy_(tmp_state)
self.network.load_state_dict(base_state)
for _ in range(self.run_config.size_FL):
self.network_FL(_).load_state_dict(base_state)
def train(self, args, warmup_epoch=0, warmup_lr=0, flag_reset_running_statistics=False):
self.init_FL(flag_reset_running_statistics)
for epoch in range(self.start_epoch, self.run_config.n_epochs + warmup_epoch):
if not self.run_config.flag_FL:
train_loss, train_top1 = self.train_one_epoch(args, epoch, warmup_epoch, warmup_lr)
else:
train_loss, train_top1 = [], []
for _ in range(self.run_config.size_FL):
loss, top1 = self.train_one_epoch(args, epoch, warmup_epoch, warmup_lr, _)
train_loss.append(loss)
train_top1.append(top1)
train_loss = np.mean(train_loss)
train_top1 = np.mean(train_top1)
self.FedAvg()
if (epoch + 1) % self.run_config.validation_frequency == 0:
img_size, val_loss, val_acc = self.validate_all_resolution(epoch=epoch, is_test=False)
is_best = np.mean(val_acc) > self.best_acc
self.best_acc = max(self.best_acc, np.mean(val_acc))
val_log = 'Valid [{0}/{1}]\tloss {2:.3f}\ttop-1 acc {3:.3f} ({4:.3f})'. \
format(epoch + 1 - warmup_epoch, self.run_config.n_epochs,
np.mean(val_loss), np.mean(val_acc), self.best_acc)
val_log += '\tTrain top-1 {top1:.3f}\tloss {train_loss:.3f}\t'. \
format(top1=train_top1, train_loss=train_loss)
for i_s, v_a in zip(img_size, val_acc):
val_log += '(%d, %.3f), ' % (i_s, v_a)
self.write_log(val_log, prefix='valid', should_print=False)
else:
is_best = False
self.save_model({
'epoch': epoch,
'best_acc': self.best_acc,
'optimizer': self.optimizer.state_dict(),
'state_dict': self.network.state_dict(),
}, is_best=is_best)
return self.network
def reset_running_statistics(self, net=None, tag_FL=-1):
from ofa.elastic_nn.utils import set_running_statistics
if tag_FL == -1:
if net is None:
net = self.network
sub_train_loader = self.run_config.random_sub_train_loader(2000, 100)
set_running_statistics(net, sub_train_loader)
elif tag_FL == self.run_config.size_FL:
if not self.run_config.flag_FL:
print('Wrong FL client ID')
import sys
sys.exit()
for _ in range(tag_FL):
self.reset_running_statistics(self.network_FL(_), _)
self.FedAvg()
else:
if tag_FL < 0 or tag_FL >= self.run_config.size_FL or not self.run_config.flag_FL:
print('Wrong FL client ID')
import sys
sys.exit()
if net is None:
net = self.network_FL(tag_FL)
sub_train_loader = self.run_config.random_sub_train_loader(2000, 100, tag_FL=tag_FL)
set_running_statistics(net, sub_train_loader)
| true | true |
f71efda83eac0f41becf83c63eefaf707a80c70d | 1,677 | py | Python | junior_class/chapter-7-Recommendation_System/code/examples/7-5-1-read-feature.py | wwhio/awesome-DeepLearning | 2cc92edcf0c22bdfc670c537cc819c8fadf33fac | [
"Apache-2.0"
] | 1,150 | 2021-06-01T03:44:21.000Z | 2022-03-31T13:43:42.000Z | junior_class/chapter-7-Recommendation_System/code/examples/7-5-1-read-feature.py | wwhio/awesome-DeepLearning | 2cc92edcf0c22bdfc670c537cc819c8fadf33fac | [
"Apache-2.0"
] | 358 | 2021-06-01T03:58:47.000Z | 2022-03-28T02:55:00.000Z | junior_class/chapter-7-Recommendation_System/code/examples/7-5-1-read-feature.py | wwhio/awesome-DeepLearning | 2cc92edcf0c22bdfc670c537cc819c8fadf33fac | [
"Apache-2.0"
] | 502 | 2021-05-31T12:52:14.000Z | 2022-03-31T02:51:41.000Z | # copyright (c) 2021 PaddlePaddle Authors. All Rights Reserve.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# unzip -o data/save_feature_v1.zip -d /home/aistudio/
import pickle
import numpy as np
mov_feat_dir = 'mov_feat.pkl'
usr_feat_dir = 'usr_feat.pkl'
usr_feats = pickle.load(open(usr_feat_dir, 'rb'))
mov_feats = pickle.load(open(mov_feat_dir, 'rb'))
usr_id = 2
usr_feat = usr_feats[str(usr_id)]
mov_id = 1
# 通过电影ID索引到电影特征
mov_feat = mov_feats[str(mov_id)]
# 电影特征的路径
movie_data_path = "./ml-1m/movies.dat"
mov_info = {}
# 打开电影数据文件,根据电影ID索引到电影信息
with open(movie_data_path, 'r', encoding="ISO-8859-1") as f:
data = f.readlines()
for item in data:
item = item.strip().split("::")
mov_info[str(item[0])] = item
usr_file = "./ml-1m/users.dat"
usr_info = {}
# 打开文件,读取所有行到data中
with open(usr_file, 'r') as f:
data = f.readlines()
for item in data:
item = item.strip().split("::")
usr_info[str(item[0])] = item
print("当前的用户是:")
print("usr_id:", usr_id, usr_info[str(usr_id)])
print("对应的特征是:", usr_feats[str(usr_id)])
print("\n当前电影是:")
print("mov_id:", mov_id, mov_info[str(mov_id)])
print("对应的特征是:")
print(mov_feat)
| 27.048387 | 74 | 0.700656 |
import pickle
import numpy as np
mov_feat_dir = 'mov_feat.pkl'
usr_feat_dir = 'usr_feat.pkl'
usr_feats = pickle.load(open(usr_feat_dir, 'rb'))
mov_feats = pickle.load(open(mov_feat_dir, 'rb'))
usr_id = 2
usr_feat = usr_feats[str(usr_id)]
mov_id = 1
mov_feat = mov_feats[str(mov_id)]
movie_data_path = "./ml-1m/movies.dat"
mov_info = {}
with open(movie_data_path, 'r', encoding="ISO-8859-1") as f:
data = f.readlines()
for item in data:
item = item.strip().split("::")
mov_info[str(item[0])] = item
usr_file = "./ml-1m/users.dat"
usr_info = {}
with open(usr_file, 'r') as f:
data = f.readlines()
for item in data:
item = item.strip().split("::")
usr_info[str(item[0])] = item
print("当前的用户是:")
print("usr_id:", usr_id, usr_info[str(usr_id)])
print("对应的特征是:", usr_feats[str(usr_id)])
print("\n当前电影是:")
print("mov_id:", mov_id, mov_info[str(mov_id)])
print("对应的特征是:")
print(mov_feat)
| true | true |
f71efe24cfefd8f04e8fa9c7fd40b5e0927fd058 | 2,592 | py | Python | tests/test_nlp.py | dhimmel/sematch | 7e92b171c27a8b25e844a467554fe4bb2adfb883 | [
"Apache-2.0"
] | 397 | 2015-05-30T11:02:28.000Z | 2022-03-09T01:39:31.000Z | tests/test_nlp.py | dhimmel/sematch | 7e92b171c27a8b25e844a467554fe4bb2adfb883 | [
"Apache-2.0"
] | 32 | 2015-04-27T21:26:29.000Z | 2021-08-19T10:20:45.000Z | tests/test_nlp.py | dhimmel/sematch | 7e92b171c27a8b25e844a467554fe4bb2adfb883 | [
"Apache-2.0"
] | 110 | 2015-11-06T17:01:48.000Z | 2022-02-17T05:09:02.000Z |
def test_extraction():
from sematch.nlp import Extraction
from sematch.semantic.sparql import EntityFeatures
upm = EntityFeatures().features('http://dbpedia.org/resource/Technical_University_of_Madrid')
extract = Extraction()
assert extract.extract_nouns(upm['abstract']) is not None
assert extract.extract_verbs(upm['abstract']) is not None
assert extract.extract_chunks_doc(upm['abstract']) is not None
cats = extract.category_features(upm['category'])
assert extract.category2words(cats) is not None
def test_rake():
from sematch.nlp import RAKE
from sematch.semantic.sparql import EntityFeatures
upm = EntityFeatures().features('http://dbpedia.org/resource/Technical_University_of_Madrid')
rake = RAKE()
assert rake.extract(upm['abstract']) is not None
def test_TFIDF():
corpus = ['This is the first document.','This is the second second document.','And the third one.','Is this the first document?',]
from sematch.nlp import TFIDF
tfidf = TFIDF(corpus)
assert tfidf.idf('document') is not None
assert tfidf.tfidf('I need a document and second') is not None
def test_Spacy():
from sematch.nlp import SpaCyNLP
sy = SpaCyNLP()
print sy.pos_tag(u'This is the second second document.')
def test_feature_extractor():
from sematch.nlp import FeatureExtractor
from sematch.nlp import EntityFeature
from sematch.nlp import SpaCyNLP
from sematch.utility import FileIO
import itertools
sy = SpaCyNLP()
w_extractor = FeatureExtractor(sy.pos_tag)
features = EntityFeature.load(feature_dict_file='models/query_features.json')
query = FileIO.read_json_file('dataset/ned/query_ned_cleaned.txt')
candidates = list(itertools.chain.from_iterable(map(lambda x: x['candidate'], query)))
set_candidates = list(set(candidates))
for can in set_candidates[:10]:
print w_extractor.entity_word_features([can], features)
def test_entity_feature():
from sematch.utility import FileIO
from sematch.nlp import EntityFeature
query = FileIO.read_json_file('dataset/ned/query_ned_cleaned.txt')
question = FileIO.read_json_file('dataset/ned/question_ned_cleaned.txt')
tweet = FileIO.read_json_file('dataset/ned/tweet_ned_cleaned.txt')
import itertools
candidates = list(itertools.chain.from_iterable(map(lambda x:x['candidate'], question)))
set_candidates = list(set(candidates))
print len(set_candidates)
EntityFeature.candidate_features(set_candidates, export_file='models/question_features.json')
test_feature_extractor() | 39.272727 | 134 | 0.745756 |
def test_extraction():
from sematch.nlp import Extraction
from sematch.semantic.sparql import EntityFeatures
upm = EntityFeatures().features('http://dbpedia.org/resource/Technical_University_of_Madrid')
extract = Extraction()
assert extract.extract_nouns(upm['abstract']) is not None
assert extract.extract_verbs(upm['abstract']) is not None
assert extract.extract_chunks_doc(upm['abstract']) is not None
cats = extract.category_features(upm['category'])
assert extract.category2words(cats) is not None
def test_rake():
from sematch.nlp import RAKE
from sematch.semantic.sparql import EntityFeatures
upm = EntityFeatures().features('http://dbpedia.org/resource/Technical_University_of_Madrid')
rake = RAKE()
assert rake.extract(upm['abstract']) is not None
def test_TFIDF():
corpus = ['This is the first document.','This is the second second document.','And the third one.','Is this the first document?',]
from sematch.nlp import TFIDF
tfidf = TFIDF(corpus)
assert tfidf.idf('document') is not None
assert tfidf.tfidf('I need a document and second') is not None
def test_Spacy():
from sematch.nlp import SpaCyNLP
sy = SpaCyNLP()
print sy.pos_tag(u'This is the second second document.')
def test_feature_extractor():
from sematch.nlp import FeatureExtractor
from sematch.nlp import EntityFeature
from sematch.nlp import SpaCyNLP
from sematch.utility import FileIO
import itertools
sy = SpaCyNLP()
w_extractor = FeatureExtractor(sy.pos_tag)
features = EntityFeature.load(feature_dict_file='models/query_features.json')
query = FileIO.read_json_file('dataset/ned/query_ned_cleaned.txt')
candidates = list(itertools.chain.from_iterable(map(lambda x: x['candidate'], query)))
set_candidates = list(set(candidates))
for can in set_candidates[:10]:
print w_extractor.entity_word_features([can], features)
def test_entity_feature():
from sematch.utility import FileIO
from sematch.nlp import EntityFeature
query = FileIO.read_json_file('dataset/ned/query_ned_cleaned.txt')
question = FileIO.read_json_file('dataset/ned/question_ned_cleaned.txt')
tweet = FileIO.read_json_file('dataset/ned/tweet_ned_cleaned.txt')
import itertools
candidates = list(itertools.chain.from_iterable(map(lambda x:x['candidate'], question)))
set_candidates = list(set(candidates))
print len(set_candidates)
EntityFeature.candidate_features(set_candidates, export_file='models/question_features.json')
test_feature_extractor() | false | true |
f71efe60dc3fc3f41673c6e0e26600a1eb10b27b | 102,638 | py | Python | models/models.py | sharabeshj/course-editor-test | 9af15d10ef1f039fdf5758134a7cb72384ccf3f5 | [
"Apache-2.0"
] | 1 | 2021-01-06T17:58:30.000Z | 2021-01-06T17:58:30.000Z | models/models.py | priyankagohil/coursebuilder-assessment | 559e867a2a846dd773471c6bc76cf6005a57098f | [
"Apache-2.0"
] | null | null | null | models/models.py | priyankagohil/coursebuilder-assessment | 559e867a2a846dd773471c6bc76cf6005a57098f | [
"Apache-2.0"
] | null | null | null | # Copyright 2013 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Core data model classes."""
__author__ = 'Pavel Simakov (psimakov@google.com)'
import collections
import copy
import datetime
import logging
import os
import sys
import time
import webapp2
import jinja2
import config
import counters
from counters import PerfCounter
from entities import BaseEntity
from entities import delete
from entities import get
from entities import put
import data_removal
import messages
import services
import transforms
import appengine_config
from common import caching
from common import utils as common_utils
from common import users
from google.appengine.api import app_identity
from google.appengine.api import mail
from google.appengine.api import memcache
from google.appengine.api import namespace_manager
from google.appengine.api import taskqueue
from google.appengine.ext import db
# We want to use memcache for both objects that exist and do not exist in the
# datastore. If object exists we cache its instance, if object does not exist
# we cache this object below.
NO_OBJECT = {}
# The default amount of time to cache the items for in memcache.
DEFAULT_CACHE_TTL_SECS = 60 * 5
# https://developers.google.com/appengine/docs/python/memcache/#Python_Limits
MEMCACHE_MAX = (1000 * 1000 - 96 - 250)
MEMCACHE_MULTI_MAX = 32 * 1000 * 1000
# Update frequency for Student.last_seen_on.
STUDENT_LAST_SEEN_ON_UPDATE_SEC = 24 * 60 * 60 # 1 day.
# Global memcache controls.
CAN_USE_MEMCACHE = config.ConfigProperty(
'gcb_can_use_memcache', bool, messages.SITE_SETTINGS_MEMCACHE,
default_value=appengine_config.PRODUCTION_MODE, label='Memcache')
# performance counters
CACHE_PUT = PerfCounter(
'gcb-models-cache-put',
'A number of times an object was put into memcache.')
CACHE_PUT_TOO_BIG = PerfCounter(
'gcb-models-cache-put-too-big',
'Number of times an object was too big to put in memcache.')
CACHE_HIT = PerfCounter(
'gcb-models-cache-hit',
'A number of times an object was found in memcache.')
CACHE_MISS = PerfCounter(
'gcb-models-cache-miss',
'A number of times an object was not found in memcache.')
CACHE_DELETE = PerfCounter(
'gcb-models-cache-delete',
'A number of times an object was deleted from memcache.')
# performance counters for in-process cache
CACHE_PUT_LOCAL = PerfCounter(
'gcb-models-cache-put-local',
'A number of times an object was put into local memcache.')
CACHE_HIT_LOCAL = PerfCounter(
'gcb-models-cache-hit-local',
'A number of times an object was found in local memcache.')
CACHE_MISS_LOCAL = PerfCounter(
'gcb-models-cache-miss-local',
'A number of times an object was not found in local memcache.')
# Intent for sending welcome notifications.
WELCOME_NOTIFICATION_INTENT = 'welcome'
class MemcacheManager(object):
"""Class that consolidates all memcache operations."""
_LOCAL_CACHE = None
_IS_READONLY = False
_READONLY_REENTRY_COUNT = 0
_READONLY_APP_CONTEXT = None
@classmethod
def _is_same_app_context_if_set(cls):
if cls._READONLY_APP_CONTEXT is None:
return True
from controllers import sites
app_context = sites.get_course_for_current_request()
return cls._READONLY_APP_CONTEXT == app_context
@classmethod
def _assert_true_clear_cache_and_raise_if_not(cls, value_to_assert, msg):
if not value_to_assert:
cls.clear_readonly_cache()
raise AssertionError(msg)
@classmethod
def _fs_begin_readonly(cls):
from controllers import sites
cls._READONLY_APP_CONTEXT = sites.get_course_for_current_request()
if cls._READONLY_APP_CONTEXT:
cls._READONLY_APP_CONTEXT.fs.begin_readonly()
@classmethod
def _fs_end_readonly(cls):
if cls._READONLY_APP_CONTEXT:
cls._READONLY_APP_CONTEXT.fs.end_readonly()
cls._READONLY_APP_CONTEXT = None
@classmethod
def begin_readonly(cls):
cls._assert_true_clear_cache_and_raise_if_not(
cls._READONLY_REENTRY_COUNT >= 0, 'Re-entry counter is < 0.')
cls._assert_true_clear_cache_and_raise_if_not(
cls._is_same_app_context_if_set(), 'Unable to switch app_context.')
if cls._READONLY_REENTRY_COUNT == 0:
appengine_config.log_appstats_event(
'MemcacheManager.begin_readonly')
cls._IS_READONLY = True
cls._LOCAL_CACHE = {}
cls._fs_begin_readonly()
cls._READONLY_REENTRY_COUNT += 1
@classmethod
def end_readonly(cls):
cls._assert_true_clear_cache_and_raise_if_not(
cls._READONLY_REENTRY_COUNT > 0, 'Re-entry counter <= 0.')
cls._assert_true_clear_cache_and_raise_if_not(
cls._is_same_app_context_if_set(), 'Unable to switch app_context.')
cls._READONLY_REENTRY_COUNT -= 1
if cls._READONLY_REENTRY_COUNT == 0:
cls._fs_end_readonly()
cls._IS_READONLY = False
cls._LOCAL_CACHE = None
cls._READONLY_APP_CONTEXT = None
appengine_config.log_appstats_event('MemcacheManager.end_readonly')
@classmethod
def clear_readonly_cache(cls):
cls._LOCAL_CACHE = None
cls._IS_READONLY = False
cls._READONLY_REENTRY_COUNT = 0
if cls._READONLY_APP_CONTEXT and (
cls._READONLY_APP_CONTEXT.fs.is_in_readonly):
cls._READONLY_APP_CONTEXT.fs.end_readonly()
cls._READONLY_APP_CONTEXT = None
@classmethod
def _local_cache_get(cls, key, namespace):
if cls._IS_READONLY:
assert cls._is_same_app_context_if_set()
_dict = cls._LOCAL_CACHE.get(namespace)
# TODO(nretallack): change to: if _dict is None
if not _dict:
_dict = {}
cls._LOCAL_CACHE[namespace] = _dict
if key in _dict:
CACHE_HIT_LOCAL.inc()
value = _dict[key]
return True, value
else:
CACHE_MISS_LOCAL.inc()
return False, None
@classmethod
def _local_cache_put(cls, key, namespace, value):
if cls._IS_READONLY:
assert cls._is_same_app_context_if_set()
_dict = cls._LOCAL_CACHE.get(namespace)
# TODO(nretallack): change to: if _dict is None
if not _dict:
_dict = {}
cls._LOCAL_CACHE[namespace] = _dict
_dict[key] = value
CACHE_PUT_LOCAL.inc()
@classmethod
def _local_cache_get_multi(cls, keys, namespace):
if cls._IS_READONLY:
assert cls._is_same_app_context_if_set()
values = []
for key in keys:
is_cached, value = cls._local_cache_get(key, namespace)
if not is_cached:
return False, []
else:
values.append(value)
return True, values
return False, []
@classmethod
def _local_cache_put_multi(cls, values, namespace):
if cls._IS_READONLY:
assert cls._is_same_app_context_if_set()
for key, value in values.items():
cls._local_cache_put(key, namespace, value)
@classmethod
def get_namespace(cls):
"""Look up namespace from namespace_manager or use default."""
namespace = namespace_manager.get_namespace()
if namespace:
return namespace
return appengine_config.DEFAULT_NAMESPACE_NAME
@classmethod
def _get_namespace(cls, namespace):
if namespace is not None:
return namespace
return cls.get_namespace()
@classmethod
def get(cls, key, namespace=None):
"""Gets an item from memcache if memcache is enabled."""
if not CAN_USE_MEMCACHE.value:
return None
_namespace = cls._get_namespace(namespace)
is_cached, value = cls._local_cache_get(key, _namespace)
if is_cached:
return copy.deepcopy(value)
value = memcache.get(key, namespace=_namespace)
# We store some objects in memcache that don't evaluate to True, but are
# real objects, '{}' for example. Count a cache miss only in a case when
# an object is None.
if value is not None:
CACHE_HIT.inc()
else:
CACHE_MISS.inc(context=key)
cls._local_cache_put(key, _namespace, value)
return copy.deepcopy(value)
@classmethod
def get_multi(cls, keys, namespace=None):
"""Gets a set of items from memcache if memcache is enabled."""
if not CAN_USE_MEMCACHE.value:
return {}
_namespace = cls._get_namespace(namespace)
is_cached, values = cls._local_cache_get_multi(keys, _namespace)
if is_cached:
return values
values = memcache.get_multi(keys, namespace=_namespace)
for key, value in values.items():
if value is not None:
CACHE_HIT.inc()
else:
logging.info('Cache miss, key: %s. %s', key, Exception())
CACHE_MISS.inc(context=key)
cls._local_cache_put_multi(values, _namespace)
return values
@classmethod
def set(cls, key, value, ttl=DEFAULT_CACHE_TTL_SECS, namespace=None,
propagate_exceptions=False):
"""Sets an item in memcache if memcache is enabled."""
# Ensure subsequent mods to value do not affect the cached copy.
value = copy.deepcopy(value)
try:
if CAN_USE_MEMCACHE.value:
size = sys.getsizeof(value)
if size > MEMCACHE_MAX:
CACHE_PUT_TOO_BIG.inc()
else:
CACHE_PUT.inc()
_namespace = cls._get_namespace(namespace)
memcache.set(key, value, ttl, namespace=_namespace)
cls._local_cache_put(key, _namespace, value)
except: # pylint: disable=bare-except
if propagate_exceptions:
raise
else:
logging.exception(
'Failed to set: %s, %s', key, cls._get_namespace(namespace))
return None
@classmethod
def set_multi(cls, mapping, ttl=DEFAULT_CACHE_TTL_SECS, namespace=None):
"""Sets a dict of items in memcache if memcache is enabled."""
try:
if CAN_USE_MEMCACHE.value:
if not mapping:
return
size = sum([
sys.getsizeof(key) + sys.getsizeof(value)
for key, value in mapping.items()])
if size > MEMCACHE_MULTI_MAX:
CACHE_PUT_TOO_BIG.inc()
else:
CACHE_PUT.inc()
_namespace = cls._get_namespace(namespace)
memcache.set_multi(mapping, time=ttl, namespace=_namespace)
cls._local_cache_put_multi(mapping, _namespace)
except: # pylint: disable=bare-except
logging.exception(
'Failed to set_multi: %s, %s',
mapping, cls._get_namespace(namespace))
return None
@classmethod
def delete(cls, key, namespace=None):
"""Deletes an item from memcache if memcache is enabled."""
assert not cls._IS_READONLY
if CAN_USE_MEMCACHE.value:
CACHE_DELETE.inc()
memcache.delete(key, namespace=cls._get_namespace(namespace))
@classmethod
def delete_multi(cls, key_list, namespace=None):
"""Deletes a list of items from memcache if memcache is enabled."""
assert not cls._IS_READONLY
if CAN_USE_MEMCACHE.value:
CACHE_DELETE.inc(increment=len(key_list))
memcache.delete_multi(
key_list, namespace=cls._get_namespace(namespace))
@classmethod
def incr(cls, key, delta, namespace=None):
"""Incr an item in memcache if memcache is enabled."""
if CAN_USE_MEMCACHE.value:
memcache.incr(
key, delta,
namespace=cls._get_namespace(namespace), initial_value=0)
CAN_AGGREGATE_COUNTERS = config.ConfigProperty(
'gcb_can_aggregate_counters', bool,
messages.SITE_SETTINGS_AGGREGATE_COUNTERS, default_value=False,
label='Aggregate Counters')
def incr_counter_global_value(name, delta):
if CAN_AGGREGATE_COUNTERS.value:
MemcacheManager.incr(
'counter:' + name, delta,
namespace=appengine_config.DEFAULT_NAMESPACE_NAME)
def get_counter_global_value(name):
if CAN_AGGREGATE_COUNTERS.value:
return MemcacheManager.get(
'counter:' + name,
namespace=appengine_config.DEFAULT_NAMESPACE_NAME)
else:
return None
counters.get_counter_global_value = get_counter_global_value
counters.incr_counter_global_value = incr_counter_global_value
DEPRECATED_CAN_SHARE_STUDENT_PROFILE = config.ConfigProperty(
'gcb_can_share_student_profile', bool, '', default_value=False,
deprecated=True)
class CollisionError(Exception):
"""Exception raised to show that a collision in a namespace has occurred."""
class ValidationError(Exception):
"""Exception raised to show that a validation failed."""
class ContentChunkEntity(BaseEntity):
"""Defines storage for ContentChunk, a blob of opaque content to display."""
_PROPERTY_EXPORT_BLACKLIST = [] # No PII in ContentChunks.
# A string that gives the type of the content chunk. At the data layer we
# make no restrictions on the values that can be used here -- we only
# require that a type is given. The type here may be independent of any
# notion of Content-Type in an HTTP header.
content_type = db.StringProperty(required=True)
# UTC last modification timestamp.
last_modified = db.DateTimeProperty(auto_now=True, required=True)
# Whether or not the chunk supports custom tags. If True, the renderer may
# be extended to parse and render those tags at display time (this is a stub
# for future functionality that does not exist yet). If False, the contents
# of the chunk will be rendered verbatim.
supports_custom_tags = db.BooleanProperty(default=False)
# Optional identifier for the chunk in the system it was sourced from.
# Format is type_id:resource_id where type_id is an identifier that maps to
# an external system and resource_id is the identifier for a resource within
# that system (e.g. 'drive:1234' or 'web:http://example.com/index.html').
# Exact values are up to the caller, but if either type_id or resource_id is
# given, both must be, they must both be truthy, and type_id cannot contain
# ':'. Max size is 500B, enforced by datastore.
uid = db.StringProperty(indexed=True)
# Payload of the chunk. Max size is 1MB, enforced by datastore.
contents = db.TextProperty()
class ContentChunkDAO(object):
"""Data access object for ContentChunks."""
@classmethod
def delete(cls, entity_id):
"""Deletes ContentChunkEntity for datastore id int; returns None."""
memcache_key = cls._get_memcache_key(entity_id)
entity = ContentChunkEntity.get_by_id(entity_id)
if entity:
delete(entity)
MemcacheManager.delete(memcache_key)
@classmethod
def get(cls, entity_id):
"""Gets ContentChunkEntityDTO or None from given datastore id int."""
if entity_id is None:
return
memcache_key = cls._get_memcache_key(entity_id)
found = MemcacheManager.get(memcache_key)
if found == NO_OBJECT:
return None
elif found:
return found
else:
result = None
cache_value = NO_OBJECT
entity = ContentChunkEntity.get_by_id(entity_id)
if entity:
result = cls._make_dto(entity)
cache_value = result
MemcacheManager.set(memcache_key, cache_value)
return result
@classmethod
def get_by_uid(cls, uid):
"""Gets list of DTOs for all entities with given uid string."""
results = ContentChunkEntity.all().filter(
ContentChunkEntity.uid.name, uid
).fetch(1000)
return sorted(
[cls._make_dto(result) for result in results],
key=lambda dto: dto.id)
@classmethod
def get_or_new_by_uid(cls, uid):
result = cls.get_one_by_uid(uid)
if result is not None:
return result
else:
type_id, resource_id = cls._split_uid(uid)
return ContentChunkDTO({
'type_id': type_id,
'resource_id': resource_id,
})
@classmethod
def get_one_by_uid(cls, uid):
matches = cls.get_by_uid(uid)
if matches:
# There is a data race in the DAO -- it's possible to create two
# entries at the same time with the same UID. If that happened,
# use the first one saved.
return matches[0]
else:
return None
@classmethod
def make_uid(cls, type_id, resource_id):
"""Makes a uid string (or None) from the given strings (or Nones)."""
if type_id is None and resource_id is None:
return None
assert type_id and resource_id and ':' not in type_id
return '%s:%s' % (type_id, resource_id)
@classmethod
def save(cls, dto):
"""Saves content of DTO and returns the key of the saved entity.
Handles both creating new and updating existing entities. If the id of a
passed DTO is found, the entity will be updated; otherwise, the entity
will be created.
Note that this method does not refetch the saved entity from the
datastore after put since this is impossible in a transaction. This
means the last_modified date we put in the cache skews from the actual
saved value by however long put took. This is expected datastore
behavior; we do not at present have a use case for perfect accuracy in
this value for our getters.
Args:
dto: ContentChunkDTO. DTO to save. Its last_modified field is
ignored.
Returns:
db.Key of saved ContentChunkEntity.
"""
return cls.save_all([dto])[0]
@classmethod
def save_all(cls, dtos):
"""Saves all given DTOs; see save() for semantics.
Args:
dtos: list of ContentChunkDTO. The last_modified field is ignored.
Returns:
List of db.Key of saved ContentChunkEntities, in order of dto input.
"""
entities = []
for dto in dtos:
if dto.id is None:
entity = ContentChunkEntity(content_type=dto.content_type)
else:
entity = ContentChunkEntity.get_by_id(dto.id)
if entity is None:
entity = ContentChunkEntity(content_type=dto.content_type)
entity.content_type = dto.content_type
entity.contents = dto.contents
entity.supports_custom_tags = dto.supports_custom_tags
entity.uid = cls.make_uid(dto.type_id, dto.resource_id)
entities.append(entity)
db.put(entities)
for entity in entities:
MemcacheManager.delete(cls._get_memcache_key(entity.key().id()))
return [entity.key() for entity in entities]
@classmethod
def _get_memcache_key(cls, entity_id):
assert entity_id is not None
return '(%s:%s)' % (ContentChunkEntity.kind(), entity_id)
@classmethod
def _make_dto(cls, entity):
type_id, resource_id = cls._split_uid(entity.uid)
return ContentChunkDTO({
'content_type': entity.content_type,
'contents': entity.contents,
'id': entity.key().id(),
'last_modified': entity.last_modified,
'resource_id': resource_id,
'supports_custom_tags': entity.supports_custom_tags,
'type_id': type_id,
})
@classmethod
def _split_uid(cls, uid):
resource_id = None
type_id = None
if uid is not None:
assert ':' in uid
type_id, resource_id = uid.split(':', 1)
assert type_id and resource_id
return type_id, resource_id
class ContentChunkDTO(object):
"""Data transfer object for ContentChunks."""
def __init__(self, entity_dict):
self.content_type = entity_dict.get('content_type')
self.contents = entity_dict.get('contents')
self.id = entity_dict.get('id')
self.last_modified = entity_dict.get('last_modified')
self.resource_id = entity_dict.get('resource_id')
self.supports_custom_tags = entity_dict.get('supports_custom_tags')
self.type_id = entity_dict.get('type_id')
def __eq__(self, other):
return (
isinstance(other, ContentChunkDTO) and
self.content_type == other.content_type and
self.contents == other.contents and
self.id == other.id and
self.last_modified == other.last_modified and
self.resource_id == other.resource_id and
self.supports_custom_tags == other.supports_custom_tags and
self.type_id == other.type_id)
class PersonalProfile(BaseEntity):
"""Personal information not specific to any course instance."""
email = db.StringProperty(indexed=False)
legal_name = db.StringProperty(indexed=False)
nick_name = db.StringProperty(indexed=False)
date_of_birth = db.DateProperty(indexed=False)
course_info = db.TextProperty()
_PROPERTY_EXPORT_BLACKLIST = [email, legal_name, nick_name, date_of_birth]
@property
def user_id(self):
return self.key().name()
@classmethod
def safe_key(cls, db_key, transform_fn):
return db.Key.from_path(cls.kind(), transform_fn(db_key.name()))
class PersonalProfileDTO(object):
"""DTO for PersonalProfile."""
def __init__(self, personal_profile=None):
self.course_info = '{}'
if personal_profile:
self.user_id = personal_profile.user_id
self.email = personal_profile.email
self.legal_name = personal_profile.legal_name
self.nick_name = personal_profile.nick_name
self.date_of_birth = personal_profile.date_of_birth
self.course_info = personal_profile.course_info
QUEUE_RETRIES_BEFORE_SENDING_MAIL = config.ConfigProperty(
'gcb_lifecycle_queue_retries_before_sending_mail', int,
messages.SITE_SETTINGS_QUEUE_NOTIFICATION, default_value=10,
label='Queue Notification',
validator=config.ValidateIntegerRange(1, 50).validate)
class StudentLifecycleObserver(webapp2.RequestHandler):
"""Provides notification on major events to Students for interested modules.
Notification is done from an App Engine deferred work queue. This is done
so that observers who _absolutely_ _positively_ _have_ _to_ _be_ _notified_
are either going to be notified, or a site administrator is going to get
an ongoing sequence of email notifications that Something Is Wrong, and
will then address the problem manually.
Modules can register to be called back for the lifecycle events listed
below. Callbacks should be registered like this:
models.StudentLifecycleObserver.EVENT_CALLBACKS[
models.StudentLifecycleObserver.EVENT_ADD]['my_module'] = my_handler
If a callback function needs to have extra information passed to it that
needs to be collected in the context where the lifecycle event is actually
happening, modules can register a function in the ENQUEUE_CALLBACKS. Add
one of those in the same way as for event callbacks:
models.StudentLifecycleObserver.ENQUEUE_CALLBACKS[
models.StudentLifecycleObserver.EVENT_ADD]['my_module'] = a_function
Event notification callbacks are called repeatedly until they return
without raising an exception. Note that due to retries having an
exponential backoff (to a maximum of two hours), you cannot rely on
notifications being delivered in any particular order relative to one
another.
Event notification callbacks must take two or three parameters:
- the user_id (a string)
- the datetime.datetime UTC timestamp when the event originally occurred.
- If-and-only-if the the enqueue callback was registered and returned a
non-None value, this argument is passed. If this value is mutable, any
changes made by the event callback will be retained in any future
re-tries.
Enqueue callback functions must take exactly one parameter:
- the user_id of the user to which the event pertains.
The function may return any data type which is convertible to a JSON
string using transforms.dumps(). This value is passed to the event
notification callback.
"""
QUEUE_NAME = 'user-lifecycle'
URL = '/_ah/queue/' + QUEUE_NAME
EVENT_ADD = 'add'
EVENT_UNENROLL = 'unenroll'
EVENT_UNENROLL_COMMANDED = 'unenroll_commanded'
EVENT_REENROLL = 'reenroll'
EVENT_CALLBACKS = {
EVENT_ADD: {},
EVENT_UNENROLL: {},
EVENT_UNENROLL_COMMANDED: {},
EVENT_REENROLL: {},
}
ENQUEUE_CALLBACKS = {
EVENT_ADD: {},
EVENT_UNENROLL: {},
EVENT_UNENROLL_COMMANDED: {},
EVENT_REENROLL: {},
}
@classmethod
def enqueue(cls, event, user_id, transactional=True):
if event not in cls.EVENT_CALLBACKS:
raise ValueError('Event "%s" not in allowed list: %s' % (
event, ' '.join(cls.EVENT_CALLBACKS)))
if not user_id:
raise ValueError('User ID must be non-blank')
if not cls.EVENT_CALLBACKS[event]:
return # No callbacks registered for event -> nothing to do; skip.
extra_data = {}
for name, callback in cls.ENQUEUE_CALLBACKS[event].iteritems():
extra_data[name] = callback(user_id)
cls._internal_enqueue(
event, user_id, cls.EVENT_CALLBACKS[event].keys(), extra_data,
transactional=transactional)
@classmethod
def _internal_enqueue(cls, event, user_id, callbacks, extra_data,
transactional):
for callback in callbacks:
if callback not in cls.EVENT_CALLBACKS[event]:
raise ValueError(
'Callback "%s" not in callbacks registered for event %s'
% (callback, event))
task = taskqueue.Task(params={
'event': event,
'user_id': user_id,
'callbacks': ' '.join(callbacks),
'timestamp': datetime.datetime.utcnow().strftime(
transforms.ISO_8601_DATETIME_FORMAT),
'extra_data': transforms.dumps(extra_data),
})
task.add(cls.QUEUE_NAME, transactional=transactional)
def post(self):
if 'X-AppEngine-QueueName' not in self.request.headers:
self.response.set_status(500)
return
user_id = self.request.get('user_id')
if not user_id:
logging.critical('Student lifecycle queue had item with no user')
self.response.set_status(200)
return
event = self.request.get('event')
if not event:
logging.critical('Student lifecycle queue had item with no event')
self.response.set_status(200)
return
timestamp_str = self.request.get('timestamp')
try:
timestamp = datetime.datetime.strptime(
timestamp_str, transforms.ISO_8601_DATETIME_FORMAT)
except ValueError:
logging.critical('Student lifecycle queue: malformed timestamp %s',
timestamp_str)
self.response.set_status(200)
return
extra_data = self.request.get('extra_data')
if extra_data:
extra_data = transforms.loads(extra_data)
else:
extra_data = {}
callbacks = self.request.get('callbacks')
if not callbacks:
logging.warning('Odd: Student lifecycle with no callback items')
self.response.set_status(200)
return
callbacks = callbacks.split(' ')
# Configure path in threadlocal cache in sites; callbacks may
# be dynamically determining their current context by calling
# sites.get_app_context_for_current_request(), which relies on
# sites.PATH_INFO_THREAD_LOCAL.path.
current_namespace = namespace_manager.get_namespace()
logging.info(
'-- Dequeue in namespace "%s" handling event %s for user %s --',
current_namespace, event, user_id)
from controllers import sites
app_context = sites.get_course_index().get_app_context_for_namespace(
current_namespace)
path = app_context.get_slug()
if hasattr(sites.PATH_INFO_THREAD_LOCAL, 'path'):
has_path_info = True
save_path_info = sites.PATH_INFO_THREAD_LOCAL.path
else:
has_path_info = False
sites.PATH_INFO_THREAD_LOCAL.path = path
try:
remaining_callbacks = []
for callback in callbacks:
if callback not in self.EVENT_CALLBACKS[event]:
logging.error(
'Student lifecycle event enqueued with callback named '
'"%s", but no such callback is currently registered.',
callback)
continue
try:
logging.info('-- Student lifecycle callback %s starting --',
callback)
callback_extra_data = extra_data.get(callback)
if callback_extra_data is None:
self.EVENT_CALLBACKS[event][callback](
user_id, timestamp)
else:
self.EVENT_CALLBACKS[event][callback](
user_id, timestamp, callback_extra_data)
logging.info('-- Student lifecycle callback %s success --',
callback)
except Exception, ex: # pylint: disable=broad-except
logging.error(
'-- Student lifecycle callback %s fails: %s --',
callback, str(ex))
common_utils.log_exception_origin()
remaining_callbacks.append(callback)
finally:
if has_path_info:
sites.PATH_INFO_THREAD_LOCAL.path = save_path_info
else:
del sites.PATH_INFO_THREAD_LOCAL.path
if remaining_callbacks == callbacks:
# If we have made _no_ progress, emit error and get queue backoff.
num_tries = 1 + int(
self.request.headers.get('X-AppEngine-TaskExecutionCount', '0'))
complaint = (
'Student lifecycle callback in namespace %s for '
'event %s enqueued at %s made no progress on any of the '
'callbacks %s for user %s after %d attempts' % (
namespace_manager.get_namespace() or '<blank>',
event, timestamp_str, callbacks, user_id, num_tries))
logging.warning(complaint)
if num_tries >= QUEUE_RETRIES_BEFORE_SENDING_MAIL.value:
app_id = app_identity.get_application_id()
sender = 'queue_admin@%s.appspotmail.com' % app_id
subject = ('Queue processing: Excessive retries '
'on student lifecycle queue')
body = complaint + ' in application ' + app_id
mail.send_mail_to_admins(sender, subject, body)
raise RuntimeError(
'Queued work incomplete; raising error to force retries.')
else:
# If we made some progress, but still have work to do, re-enqueue
# remaining work.
if remaining_callbacks:
logging.warning(
'Student lifecycle callback for event %s enqueued at %s '
'made some progress, but needs retries for the following '
'callbacks: %s', event, timestamp_str, callbacks)
self._internal_enqueue(
event, user_id, remaining_callbacks, extra_data,
transactional=False)
# Claim success if any work done, whether or not any work remains.
self.response.set_status(200)
class StudentProfileDAO(object):
"""All access and mutation methods for PersonalProfile and Student."""
TARGET_NAMESPACE = appengine_config.DEFAULT_NAMESPACE_NAME
# Only for use from modules with fields in Student or PersonalProfile.
# (As of 2016-02-19, this is only student_groups). Other modules should
# register with StudentLifecycleObserver.
STUDENT_CREATION_HOOKS = []
@classmethod
def _memcache_key(cls, key):
"""Makes a memcache key from primary key."""
return 'entity:personal-profile:%s' % key
@classmethod
def _get_profile_by_user_id(cls, user_id):
"""Loads profile given a user_id and returns Entity object."""
old_namespace = namespace_manager.get_namespace()
try:
namespace_manager.set_namespace(cls.TARGET_NAMESPACE)
profile = MemcacheManager.get(
cls._memcache_key(user_id), namespace=cls.TARGET_NAMESPACE)
if profile == NO_OBJECT:
return None
if profile:
return profile
profile = PersonalProfile.get_by_key_name(user_id)
MemcacheManager.set(
cls._memcache_key(user_id), profile if profile else NO_OBJECT,
namespace=cls.TARGET_NAMESPACE)
return profile
finally:
namespace_manager.set_namespace(old_namespace)
@classmethod
def delete_profile_by_user_id(cls, user_id):
with common_utils.Namespace(cls.TARGET_NAMESPACE):
PersonalProfile.delete_by_key(user_id)
MemcacheManager.delete(
cls._memcache_key(user_id), namespace=cls.TARGET_NAMESPACE)
@classmethod
def add_new_profile(cls, user_id, email):
"""Adds new profile for a user_id and returns Entity object."""
old_namespace = namespace_manager.get_namespace()
try:
namespace_manager.set_namespace(cls.TARGET_NAMESPACE)
profile = PersonalProfile(key_name=user_id)
profile.email = email
profile.put()
return profile
finally:
namespace_manager.set_namespace(old_namespace)
@classmethod
def _update_global_profile_attributes(
cls, profile,
email=None, legal_name=None, nick_name=None,
date_of_birth=None, is_enrolled=None, final_grade=None,
course_info=None):
"""Modifies various attributes of Student's Global Profile."""
if email is not None:
profile.email = email.lower()
if legal_name is not None:
profile.legal_name = legal_name
if nick_name is not None:
profile.nick_name = nick_name
if date_of_birth is not None:
profile.date_of_birth = date_of_birth
# TODO(nretallack): Remove this block and re-calculate this dynamically
if final_grade is not None or course_info is not None:
# Defer to avoid circular import.
from controllers import sites
course = sites.get_course_for_current_request()
course_namespace = course.get_namespace_name()
course_info_dict = {}
if profile.course_info:
course_info_dict = transforms.loads(profile.course_info)
info = course_info_dict.get(course_namespace, {})
if final_grade:
info['final_grade'] = final_grade
if course_info:
info['info'] = course_info
course_info_dict[course_namespace] = info
profile.course_info = transforms.dumps(course_info_dict)
@classmethod
def _update_course_profile_attributes(
cls, student, nick_name=None, is_enrolled=None, labels=None):
"""Modifies various attributes of Student's Course Profile."""
if nick_name is not None:
student.name = nick_name
if is_enrolled is not None:
student.is_enrolled = is_enrolled
if labels is not None:
student.labels = labels
@classmethod
def _update_attributes(
cls, profile, student,
email=None, legal_name=None, nick_name=None,
date_of_birth=None, is_enrolled=None, final_grade=None,
course_info=None, labels=None):
"""Modifies various attributes of Student and Profile."""
if profile:
cls._update_global_profile_attributes(
profile, email=email, legal_name=legal_name,
nick_name=nick_name, date_of_birth=date_of_birth,
is_enrolled=is_enrolled, final_grade=final_grade,
course_info=course_info)
if student:
cls._update_course_profile_attributes(
student, nick_name=nick_name, is_enrolled=is_enrolled,
labels=labels)
@classmethod
def _put_profile(cls, profile):
"""Does a put() on profile objects."""
if not profile:
return
profile.put()
MemcacheManager.delete(
cls._memcache_key(profile.user_id),
namespace=cls.TARGET_NAMESPACE)
@classmethod
def get_profile_by_user_id(cls, user_id):
"""Loads profile given a user_id and returns DTO object."""
profile = cls._get_profile_by_user_id(user_id)
if profile:
return PersonalProfileDTO(personal_profile=profile)
return None
@classmethod
def get_profile_by_user(cls, user):
return cls.get_profile_by_user_id(user.user_id())
@classmethod
def add_new_student_for_current_user(
cls, nick_name, additional_fields, handler, labels=None):
user = users.get_current_user()
student_by_uid = Student.get_by_user_id(user.user_id())
is_valid_student = (student_by_uid is None or
student_by_uid.user_id == user.user_id())
assert is_valid_student, (
'Student\'s email and user id do not match.')
student = cls._add_new_student_for_current_user(
user.user_id(), user.email(), nick_name, additional_fields, labels)
try:
cls._send_welcome_notification(handler, student)
except Exception, e: # On purpose. pylint: disable=broad-except
logging.error(
'Unable to send welcome notification; error was: ' + str(e))
@classmethod
def _add_new_student_for_current_user(
cls, user_id, email, nick_name, additional_fields, labels=None):
student = Student.get_by_user_id(user_id)
key_name = None
if student:
key_name = student.key().name()
student = cls._add_new_student_for_current_user_in_txn(
key_name, user_id, email, nick_name, additional_fields, labels)
return student
@classmethod
@db.transactional(xg=True)
def _add_new_student_for_current_user_in_txn(
cls, key_name, user_id, email, nick_name, additional_fields,
labels=None):
"""Create new or re-enroll old student."""
# create profile if does not exist
profile = cls._get_profile_by_user_id(user_id)
if not profile:
profile = cls.add_new_profile(user_id, email)
# create new student or re-enroll existing
if key_name:
student = Student.get_by_key_name(key_name)
else:
student = Student._add_new( # pylint: disable=protected-access
user_id, email)
# update profile
cls._update_attributes(
profile, student, nick_name=nick_name, is_enrolled=True,
labels=labels)
# update student
student.email = email.lower()
student.additional_fields = additional_fields
common_utils.run_hooks(cls.STUDENT_CREATION_HOOKS, student, profile)
# put both
cls._put_profile(profile)
student.put()
StudentLifecycleObserver.enqueue(
StudentLifecycleObserver.EVENT_ADD, user_id)
return student
@classmethod
def _send_welcome_notification(cls, handler, student):
if not cls._can_send_welcome_notifications(handler):
return
if services.unsubscribe.has_unsubscribed(student.email):
return
course_settings = handler.app_context.get_environ()['course']
course_title = course_settings['title']
sender = cls._get_welcome_notifications_sender(handler)
assert sender, 'Must set welcome_notifications_sender in course.yaml'
context = {
'student_name': student.name,
'course_title': course_title,
'course_url': handler.get_base_href(handler),
'unsubscribe_url': services.unsubscribe.get_unsubscribe_url(
handler, student.email)
}
if course_settings.get('welcome_notifications_subject'):
subject = jinja2.Template(unicode(
course_settings['welcome_notifications_subject']
)).render(context)
else:
subject = 'Welcome to ' + course_title
if course_settings.get('welcome_notifications_body'):
body = jinja2.Template(unicode(
course_settings['welcome_notifications_body']
)).render(context)
else:
jinja_environment = handler.app_context.fs.get_jinja_environ(
[os.path.join(
appengine_config.BUNDLE_ROOT, 'views', 'notifications')],
autoescape=False)
body = jinja_environment.get_template('welcome.txt').render(context)
services.notifications.send_async(
student.email, sender, WELCOME_NOTIFICATION_INTENT,
body, subject, audit_trail=context,
)
@classmethod
def _can_send_welcome_notifications(cls, handler):
return (
services.notifications.enabled() and services.unsubscribe.enabled()
and cls._get_send_welcome_notifications(handler))
@classmethod
def _get_send_welcome_notifications(cls, handler):
return handler.app_context.get_environ().get(
'course', {}
).get('send_welcome_notifications', False)
@classmethod
def _get_welcome_notifications_sender(cls, handler):
return handler.app_context.get_environ().get(
'course', {}
).get('welcome_notifications_sender')
@classmethod
def get_enrolled_student_by_user_for(cls, user, app_context):
"""Returns student for a specific course."""
old_namespace = namespace_manager.get_namespace()
try:
namespace_manager.set_namespace(app_context.get_namespace_name())
return Student.get_enrolled_student_by_user(user)
finally:
namespace_manager.set_namespace(old_namespace)
@classmethod
def unregister_user(cls, user_id, unused_timestamp):
student = Student.get_by_user_id(user_id)
if not student:
logging.info(
'Unregister commanded for user %s, but user already gone.',
user_id)
return
cls.update(user_id, None, is_enrolled=False)
@classmethod
def update(
cls, user_id, email, legal_name=None, nick_name=None,
date_of_birth=None, is_enrolled=None, final_grade=None,
course_info=None, labels=None, profile_only=False):
student = Student.get_by_user_id(user_id)
key_name = None
if student:
key_name = student.key().name()
cls._update_in_txn(
key_name, user_id, email, legal_name, nick_name, date_of_birth,
is_enrolled, final_grade, course_info, labels, profile_only)
@classmethod
@db.transactional(xg=True)
def _update_in_txn(
cls, key_name, user_id, email, legal_name=None, nick_name=None,
date_of_birth=None, is_enrolled=None, final_grade=None,
course_info=None, labels=None, profile_only=False):
"""Updates a student and/or their global profile."""
student = None
if not profile_only:
if key_name:
student = Student.get_by_key_name(key_name)
if not student:
raise Exception('Unable to find student for: %s' % email)
profile = cls._get_profile_by_user_id(user_id)
if not profile:
profile = cls.add_new_profile(user_id, email)
if (student and is_enrolled is not None and
student.is_enrolled != is_enrolled):
if is_enrolled:
event = StudentLifecycleObserver.EVENT_REENROLL
else:
event = StudentLifecycleObserver.EVENT_UNENROLL
StudentLifecycleObserver.enqueue(event, student.user_id)
cls._update_attributes(
profile, student, email=email, legal_name=legal_name,
nick_name=nick_name, date_of_birth=date_of_birth,
is_enrolled=is_enrolled, final_grade=final_grade,
course_info=course_info, labels=labels)
cls._put_profile(profile)
if not profile_only:
student.put()
class StudentCache(caching.RequestScopedSingleton):
"""Class that manages optimized loading of Students from datastore."""
def __init__(self):
self._key_name_to_student = {}
@classmethod
def _key(cls, user_id):
"""Make key specific to user_id and current namespace."""
return '%s-%s' % (MemcacheManager.get_namespace(), user_id)
def _get_by_user_id_from_datastore(self, user_id):
"""Load Student by user_id. Fail if user_id is not unique."""
# In the CB 1.8 and below email was the key_name. This is no longer
# true. To support legacy Student entities do a double look up here:
# first by the key_name value and then by the user_id field value.
student = Student.get_by_key_name(user_id)
if student:
return student
students = Student.all().filter(
Student.user_id.name, user_id).fetch(limit=2)
if len(students) > 1:
raise Exception(
'There is more than one student with user_id "%s"' % user_id)
return students[0] if students else None
def _get_by_user_id(self, user_id):
"""Get cached Student with user_id or load one from datastore."""
key = self._key(user_id)
if key in self._key_name_to_student:
return self._key_name_to_student[key]
student = self._get_by_user_id_from_datastore(user_id)
self._key_name_to_student[key] = student
return student
def _remove(self, user_id):
"""Remove cached value by user_id."""
key = self._key(user_id)
if key in self._key_name_to_student:
del self._key_name_to_student[key]
@classmethod
def remove(cls, user_id):
# pylint: disable=protected-access
return cls.instance()._remove(user_id)
@classmethod
def get_by_user_id(cls, user_id):
# pylint: disable=protected-access
return cls.instance()._get_by_user_id(user_id)
class _EmailProperty(db.StringProperty):
"""Class that provides dual look up of email property value."""
def __get__(self, model_instance, model_class):
if model_instance is None:
return self
# Try to get and use the actual stored value.
value = super(_EmailProperty, self).__get__(model_instance, model_class)
if value:
return value
# In CB 1.8 and before email was used as a key to Student entity. This
# is no longer true. Here we provide the backwards compatibility for the
# legacy Student instances. If user_id and key.name match, it means that
# the user_id is a key and we should not use key as email and email is
# not set. If key.name and user_id don't match, the key is also an
# email.
if model_instance.is_saved():
user_id = model_instance.user_id
key_name = model_instance.key().name()
if key_name != user_id:
return key_name
return None
class _ReadOnlyStringProperty(db.StringProperty):
"""Class that provides set once read-only property."""
def __set__(self, model_instance, value):
if model_instance:
validated_value = self.validate(value)
current_value = self.get_value_for_datastore(model_instance)
if current_value and current_value != validated_value:
raise ValueError(
'Unable to change set once read-only property '
'%s.' % self.name)
super(_ReadOnlyStringProperty, self).__set__(model_instance, value)
class Student(BaseEntity):
"""Student data specific to a course instance.
This entity represents a student in a specific course. It has somewhat
complex key_name/user_id/email behavior that comes from the legacy mistakes.
Current and historical behavior of this class is documented in detail below.
Let us start with the historical retrospect. In CB 1.8 and below:
- key_name
- is email for new users
- email
- is used as key_name
- is unique
- is immutable
- user_id
- was introduced in CB 1.2.0
- was an independent field and not a key_name
- held google.appengine.api.users.get_current_user().user_id()
- mutations were not explicitly prevented, but no mutations are
known to have occurred
- was used as foreign key in other tables
Anyone who attempts federated identity will find use of email as key_name
completely unacceptable. So we decided to make user_id a key_name.
The ideal solution would have been:
- store user_id as key_name for new and legacy users
- store email as an independent mutable field
The ideal solution was rejected upon discussion. It required taking course
offline and running M/R or ETL job to modify key_name. All foreign key
relationships that used old key_name value would need to be fixed up to use
new value. ETL would also need to be aware of different key structure before
and after CB 1.8. All in all the ideal approach is complex, invasive and
error prone. It was ultimately rejected.
The currently implemented solution is:
- key_name
- == user_id for new users
- == email for users created in CB 1.8 and below
- user_id
- is used as key_name
- is immutable independent field
- holds google.appengine.api.users.get_current_user().user_id()
- historical convention is to use user_id as foreign key in other
tables
- email
- is an independent field
- is mutable
This solution is a bit complex, but provides all of the behaviors of the
ideal solution. It is 100% backwards compatible and does not require offline
upgrade step. For example, if student, who registered and unregistered in
CB 1.8 or below, now re-registers, we will reactivate his original student
entry thus keeping all prior progress (modulo data-removal policy).
The largest new side effects is:
- there may be several users with the same email in one course
If email uniqueness is desired it needs to be added on separately.
We automatically execute core CB functional tests under both the old and the
new key_name logic. This is done in LegacyEMailAsKeyNameTest. The exact
interplay of key_name/user_id/email is tested in StudentKeyNameTest. We also
manually ran the entire test suite with _LEGACY_EMAIL_AS_KEY_NAME_ENABLED
set to True and all test passed, except test_rpc_performance in the class
tests.functional.modules_i18n_dashboard.SampleCourseLocalizationTest. This
failure is expected as we have extra datastore lookup in get() by email.
We did not optimize get() by email RPC performance as this call is used
rarely. To optimize dual datastore lookup in get_by_user_id() we tried
memcache and request-scope cache for Student. Request-scoped cache provided
much better results and this is what we implemented.
We are confident that core CB components, including peer review system, use
user_id as foreign key and will continue working with no changes. Any custom
components that used email as foreign key they will stop working and will
require modifications and an upgrade step.
TODO(psimakov):
- review how email changes propagate between global and per-course
namespaces
Good luck!
"""
enrolled_on = db.DateTimeProperty(auto_now_add=True, indexed=True)
user_id = _ReadOnlyStringProperty(indexed=True)
email = _EmailProperty(indexed=True)
name = db.StringProperty(indexed=False)
additional_fields = db.TextProperty(indexed=False)
is_enrolled = db.BooleanProperty(indexed=False)
# UTC timestamp of when the user last rendered a page that's aware they've
# enrolled in a course.
last_seen_on = db.DateTimeProperty(indexed=True)
# Each of the following is a string representation of a JSON dict.
scores = db.TextProperty(indexed=False)
labels = db.StringProperty(indexed=False)
# Group ID of group in which student is a member; can be None.
group_id = db.IntegerProperty(indexed=True)
# In CB 1.8 and below an email was used as a key_name. This is no longer
# true and a user_id is the key_name. We transparently support legacy
# Student entity instances that still have email as key_name, but we no
# longer create new entries. If you need to enable this old behavior set the
# flag below to True.
_LEGACY_EMAIL_AS_KEY_NAME_ENABLED = False
# TODO(mgainer): why is user_id is not here?
_PROPERTY_EXPORT_BLACKLIST = [
additional_fields, # Suppress all additional_fields items.
# Convenience items if not all additional_fields should be suppressed:
#'additional_fields.xsrf_token', # Not PII, but also not useful.
#'additional_fields.form01', # User's name on registration form.
email, name]
def __init__(self, *args, **kwargs):
super(Student, self).__init__(*args, **kwargs)
self._federated_email_cached = False
self._federated_email_value = None
@classmethod
def safe_key(cls, db_key, transform_fn):
return db.Key.from_path(cls.kind(), transform_fn(db_key.id_or_name()))
@classmethod
def _add_new(cls, user_id, email):
if cls._LEGACY_EMAIL_AS_KEY_NAME_ENABLED:
return Student(key_name=email, email=email, user_id=user_id)
else:
return Student(
key_name=user_id, email=email.lower(), user_id=user_id)
def for_export(self, transform_fn):
"""Creates an ExportEntity populated from this entity instance."""
assert not hasattr(self, 'key_by_user_id')
model = super(Student, self).for_export(transform_fn)
model.user_id = transform_fn(self.user_id)
# Add a version of the key that always uses the user_id for the name
# component. This can be used to establish relationships between objects
# where the student key used was created via get_key(). In general,
# this means clients will join exports on this field, not the field made
# from safe_key().
model.key_by_user_id = self.get_key(transform_fn=transform_fn)
return model
@property
def federated_email(self):
"""Gets the federated email address of the student.
This always returns None unless federated authentication is enabled and
the federated authentication implementation implements an email
resolver. See common.users.FederatedEmailResolver.
"""
if not self._federated_email_cached:
manager = users.UsersServiceManager.get()
resolver = manager.get_federated_email_resolver_class()
assert resolver
self._federated_email_value = (
resolver.get(self.user_id) if self.user_id else None)
self._federated_email_cached = True
return self._federated_email_value
@property
def is_transient(self):
return False
@property
def profile(self):
return StudentProfileDAO.get_profile_by_user_id(self.user_id)
def put(self):
"""Do the normal put() and also add the object to cache."""
StudentCache.remove(self.user_id)
return super(Student, self).put()
def delete(self):
"""Do the normal delete() and also remove the object from cache."""
StudentCache.remove(self.user_id)
super(Student, self).delete()
@classmethod
def add_new_student_for_current_user(
cls, nick_name, additional_fields, handler, labels=None):
StudentProfileDAO.add_new_student_for_current_user(
nick_name, additional_fields, handler, labels)
@classmethod
def get_first_by_email(cls, email):
"""Get the first student matching requested email.
Returns:
A tuple: (Student, unique). The first value is the first student
object with requested email. The second value is set to True if only
exactly one student has this email on record; False otherwise.
"""
# In the CB 1.8 and below email was the key_name. This is no longer
# true. To support legacy Student entities do a double look up here:
# first by the key_name value and then by the email field value.
student = cls.get_by_key_name(email)
if student:
return (student, True)
students = cls.all().filter(cls.email.name, email).fetch(limit=2)
if not students:
return (None, False)
return (students[0], len(students) == 1)
@classmethod
def get_by_user(cls, user):
return cls.get_by_user_id(user.user_id())
@classmethod
def get_enrolled_student_by_user(cls, user):
"""Returns enrolled student or None."""
student = cls.get_by_user_id(user.user_id())
if student and student.is_enrolled:
return student
return None
@classmethod
def is_email_in_use(cls, email):
"""Checks if an email is in use by one of existing student."""
if cls.all().filter(cls.email.name, email).fetch(limit=1):
return True
return False
@classmethod
def _get_user_and_student(cls):
"""Loads user and student and asserts both are present."""
user = users.get_current_user()
if not user:
raise Exception('No current user.')
student = Student.get_by_user(user)
if not student:
raise Exception('Student instance corresponding to user_id %s not '
'found.' % user.user_id())
return user, student
@classmethod
def rename_current(cls, new_name):
"""Gives student a new name."""
_, student = cls._get_user_and_student()
StudentProfileDAO.update(
student.user_id, student.email, nick_name=new_name)
@classmethod
def set_enrollment_status_for_current(cls, is_enrolled):
"""Changes student enrollment status."""
_, student = cls._get_user_and_student()
StudentProfileDAO.update(
student.user_id, student.email, is_enrolled=is_enrolled)
@classmethod
def set_labels_for_current(cls, labels):
"""Set labels for tracks on the student."""
_, student = cls._get_user_and_student()
StudentProfileDAO.update(
student.user_id, student.email, labels=labels)
def get_key(self, transform_fn=None):
"""Gets a version of the key that uses user_id for the key name."""
if not self.user_id:
raise Exception('Student instance has no user_id set.')
user_id = transform_fn(self.user_id) if transform_fn else self.user_id
return db.Key.from_path(Student.kind(), user_id)
@classmethod
def get_by_user_id(cls, user_id):
"""Get object from datastore with the help of cache."""
return StudentCache.get_by_user_id(user_id)
@classmethod
def delete_by_user_id(cls, user_id):
student = cls.get_by_user_id(user_id)
if student:
student.delete()
def has_same_key_as(self, key):
"""Checks if the key of the student and the given key are equal."""
return key == self.get_key()
def get_labels_of_type(self, label_type):
if not self.labels:
return set()
label_ids = LabelDAO.get_set_of_ids_of_type(label_type)
return set([int(label) for label in
common_utils.text_to_list(self.labels)
if int(label) in label_ids])
def update_last_seen_on(self, now=None, value=None):
"""Updates last_seen_on.
Args:
now: datetime.datetime.utcnow or None. Injectable for tests only.
value: datetime.datetime.utcnow or None. Injectable for tests only.
"""
now = now if now is not None else datetime.datetime.utcnow()
value = value if value is not None else now
if self._should_update_last_seen_on(value):
self.last_seen_on = value
self.put()
StudentCache.remove(self.user_id)
def _should_update_last_seen_on(self, value):
if self.last_seen_on is None:
return True
return (
(value - self.last_seen_on).total_seconds() >
STUDENT_LAST_SEEN_ON_UPDATE_SEC)
class TransientStudent(object):
"""A transient student (i.e. a user who hasn't logged in or registered)."""
@property
def is_transient(self):
return True
@property
def is_enrolled(self):
return False
@property
def scores(self):
return {}
class EventEntity(BaseEntity):
"""Generic events.
Each event has a 'source' that defines a place in a code where the event was
recorded. Each event has a 'user_id' to represent an actor who triggered
the event. The event 'data' is a JSON object, the format of which is defined
elsewhere and depends on the type of the event.
When extending this class, be sure to register your new class with
models.data_removal.Registry so that instances can be cleaned up on user
un-registration.
"""
recorded_on = db.DateTimeProperty(auto_now_add=True, indexed=True)
source = db.StringProperty(indexed=False)
user_id = db.StringProperty(indexed=False)
# Each of the following is a string representation of a JSON dict.
data = db.TextProperty(indexed=False)
# Modules may add functions to this list which will receive notification
# whenever an event is recorded. The method will be called with the
# arguments (source, user, data) from record().
EVENT_LISTENERS = []
@classmethod
@db.non_transactional
def _run_record_hooks(cls, source, user, data_dict):
for listener in cls.EVENT_LISTENERS:
try:
listener(source, user, data_dict)
except Exception: # On purpose. pylint: disable=broad-except
logging.exception(
'Event record hook failed: %s, %s, %s',
source, user.user_id(), data_dict)
@classmethod
def record(cls, source, user, data, user_id=None):
"""Records new event into a datastore."""
data_dict = transforms.loads(data)
cls._run_record_hooks(source, user, data_dict)
data = transforms.dumps(data_dict)
event = cls()
event.source = source
event.user_id = user_id if user_id else user.user_id()
event.data = data
event.put()
def for_export(self, transform_fn):
model = super(EventEntity, self).for_export(transform_fn)
model.user_id = transform_fn(self.user_id)
return model
def get_user_ids(self):
# Thus far, events pertain only to one user; no need to look in data.
return [self.user_id]
class StudentAnswersEntity(BaseEntity):
"""Student answers to the assessments."""
updated_on = db.DateTimeProperty(indexed=True)
# Each of the following is a string representation of a JSON dict.
data = db.TextProperty(indexed=False)
@classmethod
def safe_key(cls, db_key, transform_fn):
return db.Key.from_path(cls.kind(), transform_fn(db_key.id_or_name()))
class StudentPropertyEntity(BaseEntity):
"""A property of a student, keyed by the string STUDENT_ID-PROPERTY_NAME.
When extending this class, be sure to register your new class with
models.data_removal.Registry so that instances can be cleaned up on user
un-registration. See an example of how to do that at the bottom of this
file.
"""
updated_on = db.DateTimeProperty(indexed=True)
name = db.StringProperty()
# Each of the following is a string representation of a JSON dict.
value = db.TextProperty()
@classmethod
def _memcache_key(cls, key):
"""Makes a memcache key from primary key."""
return 'entity:student_property:%s' % key
@classmethod
def create_key(cls, student_id, property_name):
return '%s-%s' % (student_id, property_name)
@classmethod
def create(cls, student, property_name):
return cls(
key_name=cls.create_key(student.user_id, property_name),
name=property_name)
@classmethod
def safe_key(cls, db_key, transform_fn):
user_id, name = db_key.name().split('-', 1)
return db.Key.from_path(
cls.kind(), '%s-%s' % (transform_fn(user_id), name))
def put(self):
"""Do the normal put() and also add the object to memcache."""
result = super(StudentPropertyEntity, self).put()
MemcacheManager.set(self._memcache_key(self.key().name()), self)
return result
def delete(self):
"""Do the normal delete() and also remove the object from memcache."""
super(StudentPropertyEntity, self).delete()
MemcacheManager.delete(self._memcache_key(self.key().name()))
@classmethod
def get(cls, student, property_name):
"""Loads student property."""
key = cls.create_key(student.user_id, property_name)
value = MemcacheManager.get(cls._memcache_key(key))
if NO_OBJECT == value:
return None
if not value:
value = cls.get_by_key_name(key)
if value:
MemcacheManager.set(cls._memcache_key(key), value)
else:
MemcacheManager.set(cls._memcache_key(key), NO_OBJECT)
return value
class BaseJsonDao(object):
"""Base DAO class for entities storing their data in a single JSON blob."""
class EntityKeyTypeId(object):
@classmethod
def get_entity_by_key(cls, entity_class, key):
return entity_class.get_by_id(int(key))
@classmethod
def new_entity(cls, entity_class, unused_key):
return entity_class() # ID auto-generated when entity is put().
class EntityKeyTypeName(object):
@classmethod
def get_entity_by_key(cls, entity_class, key):
return entity_class.get_by_key_name(key)
@classmethod
def new_entity(cls, entity_class, key_name):
return entity_class(key_name=key_name)
@classmethod
def _memcache_key(cls, obj_id):
"""Makes a memcache key from datastore id."""
# Keeping case-sensitivity in kind() because Foo(object) != foo(object).
return '(entity:%s:%s)' % (cls.ENTITY.kind(), obj_id)
@classmethod
def _memcache_all_key(cls):
"""Makes a memcache key for caching get_all()."""
# Keeping case-sensitivity in kind() because Foo(object) != foo(object).
return '(entity-get-all:%s)' % cls.ENTITY.kind()
@classmethod
def get_all_mapped(cls):
# try to get from memcache
entities = MemcacheManager.get(cls._memcache_all_key())
if entities is not None and entities != NO_OBJECT:
cls._maybe_apply_post_load_hooks(entities.itervalues())
return entities
# get from datastore
result = {dto.id: dto for dto in cls.get_all_iter()}
# put into memcache
result_to_cache = NO_OBJECT
if result:
result_to_cache = result
MemcacheManager.set(cls._memcache_all_key(), result_to_cache)
cls._maybe_apply_post_load_hooks(result.itervalues())
return result
@classmethod
def get_all(cls):
return cls.get_all_mapped().values()
@classmethod
def get_all_iter(cls):
"""Return a generator that will produce all DTOs of a given type.
Yields:
A DTO for each row in the Entity type's table.
"""
prev_cursor = None
any_records = True
while any_records:
any_records = False
query = cls.ENTITY.all().with_cursor(prev_cursor)
for entity in query.run():
any_records = True
yield cls.DTO(entity.key().id_or_name(),
transforms.loads(entity.data))
prev_cursor = query.cursor()
@classmethod
def _maybe_apply_post_load_hooks(cls, dto_list):
"""Run any post-load processing hooks.
Modules may insert post-load processing hooks (e.g. for i18n
translation) into the list POST_LOAD_HOOKS defined on the DAO class.
If the class has this list and any hook functions are present, they
are passed the list of DTO's for in-place processing.
Args:
dto_list: list of DTO objects
"""
if hasattr(cls, 'POST_LOAD_HOOKS'):
for hook in cls.POST_LOAD_HOOKS:
hook(dto_list)
@classmethod
def _maybe_apply_post_save_hooks(cls, dto_and_id_list):
"""Run any post-save processing hooks.
Modules may insert post-save processing hooks (e.g. for i18n
translation) into the list POST_SAVE_HOOKS defined on the DAO class.
If the class has this list and any hook functions are present, they
are passed the list of DTO's for in-place processing.
Args:
dto_and_id_list: list of pairs of (id, DTO) objects
"""
dto_list = [
cls.DTO(dto_id, orig_dto.dict)
for dto_id, orig_dto in dto_and_id_list]
if hasattr(cls, 'POST_SAVE_HOOKS'):
common_utils.run_hooks(cls.POST_SAVE_HOOKS, dto_list)
@classmethod
def _load_entity(cls, obj_id):
if not obj_id:
return None
memcache_key = cls._memcache_key(obj_id)
entity = MemcacheManager.get(memcache_key)
if NO_OBJECT == entity:
return None
if not entity:
entity = cls.ENTITY_KEY_TYPE.get_entity_by_key(cls.ENTITY, obj_id)
if entity:
MemcacheManager.set(memcache_key, entity)
else:
MemcacheManager.set(memcache_key, NO_OBJECT)
return entity
@classmethod
def load(cls, obj_id):
entity = cls._load_entity(obj_id)
if entity:
dto = cls.DTO(obj_id, transforms.loads(entity.data))
cls._maybe_apply_post_load_hooks([dto])
return dto
else:
return None
@classmethod
@appengine_config.timeandlog('Models.bulk_load')
def bulk_load(cls, obj_id_list):
# fetch from memcache
memcache_keys = [cls._memcache_key(obj_id) for obj_id in obj_id_list]
memcache_entities = MemcacheManager.get_multi(memcache_keys)
# fetch missing from datastore
both_keys = zip(obj_id_list, memcache_keys)
datastore_keys = [
obj_id for obj_id, memcache_key in both_keys
if memcache_key not in memcache_entities]
if datastore_keys:
datastore_entities = dict(zip(
datastore_keys, get([
db.Key.from_path(cls.ENTITY.kind(), obj_id)
for obj_id in datastore_keys])))
else:
datastore_entities = {}
# weave the results together
ret = []
memcache_update = {}
dtos_for_post_hooks = []
for obj_id, memcache_key in both_keys:
entity = datastore_entities.get(obj_id)
if entity is not None:
dto = cls.DTO(obj_id, transforms.loads(entity.data))
ret.append(dto)
dtos_for_post_hooks.append(dto)
memcache_update[memcache_key] = entity
elif memcache_key not in memcache_entities:
ret.append(None)
memcache_update[memcache_key] = NO_OBJECT
else:
entity = memcache_entities[memcache_key]
if NO_OBJECT == entity:
ret.append(None)
else:
ret.append(cls.DTO(obj_id, transforms.loads(entity.data)))
# run hooks
cls._maybe_apply_post_load_hooks(dtos_for_post_hooks)
# put into memcache
if datastore_entities:
MemcacheManager.set_multi(memcache_update)
return ret
@classmethod
def _create_if_necessary(cls, dto):
entity = cls._load_entity(dto.id)
if not entity:
entity = cls.ENTITY_KEY_TYPE.new_entity(cls.ENTITY, dto.id)
entity.data = transforms.dumps(dto.dict)
return entity
@classmethod
def before_put(cls, dto, entity):
pass
@classmethod
def save(cls, dto):
entity = cls._create_if_necessary(dto)
cls.before_put(dto, entity)
entity.put()
MemcacheManager.delete(cls._memcache_all_key())
id_or_name = entity.key().id_or_name()
MemcacheManager.set(cls._memcache_key(id_or_name), entity)
cls._maybe_apply_post_save_hooks([(id_or_name, dto)])
return id_or_name
@classmethod
def save_all(cls, dtos):
"""Performs a block persist of a list of DTO's."""
entities = []
for dto in dtos:
entity = cls._create_if_necessary(dto)
entities.append(entity)
cls.before_put(dto, entity)
keys = put(entities)
MemcacheManager.delete(cls._memcache_all_key())
for key, entity in zip(keys, entities):
MemcacheManager.set(cls._memcache_key(key.id_or_name()), entity)
id_or_name_list = [key.id_or_name() for key in keys]
cls._maybe_apply_post_save_hooks(zip(id_or_name_list, dtos))
return id_or_name_list
@classmethod
def delete(cls, dto):
entity = cls._load_entity(dto.id)
entity.delete()
MemcacheManager.delete(cls._memcache_all_key())
MemcacheManager.delete(cls._memcache_key(entity.key().id_or_name()))
@classmethod
def clone(cls, dto):
return cls.DTO(None, copy.deepcopy(dto.dict))
class LastModifiedJsonDao(BaseJsonDao):
"""Base DAO that updates the last_modified field of entities on every save.
DTOs managed by this DAO must have a settable field last_modified defined.
"""
@classmethod
def save(cls, dto):
dto.last_modified = time.time()
return super(LastModifiedJsonDao, cls).save(dto)
@classmethod
def save_all(cls, dtos):
for dto in dtos:
dto.last_modified = time.time()
return super(LastModifiedJsonDao, cls).save_all(dtos)
class QuestionEntity(BaseEntity):
"""An object representing a top-level question."""
data = db.TextProperty(indexed=False)
class QuestionDTO(object):
"""DTO for question entities."""
MULTIPLE_CHOICE = 0
SHORT_ANSWER = 1
def __init__(self, the_id, the_dict):
self.id = the_id
self.dict = the_dict
@property
def type(self):
return self.dict.get('type')
@type.setter
def type(self, value):
self.dict['type'] = value
@property
def description(self):
return self.dict.get('description') or ''
@description.setter
def description(self, value):
self.dict['description'] = value
@property
def last_modified(self):
return self.dict.get('last_modified') or ''
@last_modified.setter
def last_modified(self, value):
self.dict['last_modified'] = value
class QuestionDAO(LastModifiedJsonDao):
VERSION = '1.5'
DTO = QuestionDTO
ENTITY = QuestionEntity
ENTITY_KEY_TYPE = BaseJsonDao.EntityKeyTypeId
# Enable other modules to add post-load transformations
POST_LOAD_HOOKS = []
# Enable other modules to add post-save transformations
POST_SAVE_HOOKS = []
@classmethod
def used_by(cls, question_id):
"""Returns the question groups using a question.
Args:
question_id: int. Identifier of the question we're testing.
Returns:
List of question groups. The list of all question groups that use
the given question.
"""
# O(num_question_groups), but deserialization of 1 large group takes
# ~1ms so practically speaking latency is OK for the admin console.
matches = []
for group in QuestionGroupDAO.get_all():
# Add the group the same amount of times as it contains the question
matches.extend([group] * (
[long(x) for x in group.question_ids].count(long(question_id))
))
return matches
@classmethod
def create_question(cls, question_dict, question_type):
question = cls.DTO(None, question_dict)
question.type = question_type
return cls.save(question)
@classmethod
def get_questions_descriptions(cls):
return set([q.description for q in cls.get_all()])
@classmethod
def validate_unique_description(cls, description):
if description in cls.get_questions_descriptions():
raise CollisionError(
'Non-unique question description: %s' % description)
return None
class QuestionImporter(object):
"""Helper class for converting ver. 1.2 questoins to ver. 1.3 ones."""
@classmethod
def _gen_description(cls, unit, lesson_title, question_number):
return (
'Unit "%s", lesson "%s" (question #%s)' % (
unit.title, lesson_title, question_number))
@classmethod
def import_freetext(cls, question, description, task):
QuestionDAO.validate_unique_description(description)
try:
response = question.get('correctAnswerRegex')
# Regex /.*/ is added as a guard for questions with no answer.
response = response.value if response else '/.*/'
return {
'version': QuestionDAO.VERSION,
'description': description,
'question': task,
'hint': question['showAnswerOutput'],
'graders': [{
'score': 1.0,
'matcher': 'regex',
'response': response,
'feedback': question.get('correctAnswerOutput', '')
}],
'defaultFeedback': question.get('incorrectAnswerOutput', '')}
except KeyError as e:
raise ValidationError('Invalid question: %s, %s' % (description, e))
@classmethod
def import_question(
cls, question, unit, lesson_title, question_number, task):
question_type = question['questionType']
task = ''.join(task)
description = cls._gen_description(unit, lesson_title, question_number)
if question_type == 'multiple choice':
question_dict = cls.import_multiple_choice(
question, description, task)
qid = QuestionDAO.create_question(
question_dict, QuestionDAO.DTO.MULTIPLE_CHOICE)
elif question_type == 'freetext':
question_dict = cls.import_freetext(question, description, task)
qid = QuestionDAO.create_question(
question_dict, QuestionDTO.SHORT_ANSWER)
elif question_type == 'multiple choice group':
question_group_dict = cls.import_multiple_choice_group(
question, description, unit, lesson_title, question_number,
task)
qid = QuestionGroupDAO.create_question_group(question_group_dict)
else:
raise ValueError('Unknown question type: %s' % question_type)
return (qid, common_utils.generate_instance_id())
@classmethod
def import_multiple_choice(cls, question, description, task):
QuestionDAO.validate_unique_description(description)
task = ''.join(task) if task else ''
qu_dict = {
'version': QuestionDAO.VERSION,
'description': description,
'question': task,
'multiple_selections': False,
'choices': [
{
'text': choice[0],
'score': 1.0 if choice[1].value else 0.0,
'feedback': choice[2]
} for choice in question['choices']]}
# Add optional fields
if 'defaultFeedback' in question:
qu_dict['defaultFeedback'] = question['defaultFeedback']
if 'permute_choices' in question:
qu_dict['permute_choices'] = question['permute_choices']
if 'show_answer_when_incorrect' in question:
qu_dict['show_answer_when_incorrect'] = (
question['show_answer_when_incorrect'])
if 'all_or_nothing_grading' in question:
qu_dict['all_or_nothing_grading'] = (
question['all_or_nothing_grading'])
return qu_dict
@classmethod
def import_multiple_choice_group(
cls, group, description, unit, lesson_title, question_number, task):
"""Import a 'multiple choice group' as a question group."""
QuestionGroupDAO.validate_unique_description(description)
question_group_dict = {
'version': QuestionDAO.VERSION,
'description': description,
'introduction': task}
question_list = []
for index, question in enumerate(group['questionsList']):
description = (
'Unit "%s", lesson "%s" (question #%s, part #%s)'
% (unit.title, lesson_title, question_number, index + 1))
question_dict = cls.import_multiple_choice_group_question(
question, description)
question = QuestionDTO(None, question_dict)
question.type = QuestionDTO.MULTIPLE_CHOICE
question_list.append(question)
qid_list = QuestionDAO.save_all(question_list)
question_group_dict['items'] = [{
'question': quid,
'weight': 1.0} for quid in qid_list]
return question_group_dict
@classmethod
def import_multiple_choice_group_question(cls, orig_question, description):
"""Import the questions from a group as individual questions."""
QuestionDAO.validate_unique_description(description)
# TODO(jorr): Handle allCorrectOutput and someCorrectOutput
correct_index = orig_question['correctIndex']
multiple_selections = not isinstance(correct_index, int)
if multiple_selections:
partial = 1.0 / len(correct_index)
choices = [{
'text': text,
'score': partial if i in correct_index else -1.0
} for i, text in enumerate(orig_question['choices'])]
else:
choices = [{
'text': text,
'score': 1.0 if i == correct_index else 0.0
} for i, text in enumerate(orig_question['choices'])]
return {
'version': QuestionDAO.VERSION,
'description': description,
'question': orig_question.get('questionHTML') or '',
'multiple_selections': multiple_selections,
'choices': choices}
@classmethod
def build_short_answer_question_dict(cls, question_html, matcher, response):
return {
'version': QuestionDAO.VERSION,
'question': question_html or '',
'graders': [{
'score': 1.0,
'matcher': matcher,
'response': response,
}]
}
@classmethod
def build_multiple_choice_question_dict(cls, question):
"""Assemble the dict for a multiple choice question."""
question_dict = {
'version': QuestionDAO.VERSION,
'question': question.get('questionHTML') or '',
'multiple_selections': False
}
choices = []
for choice in question.get('choices'):
if isinstance(choice, basestring):
text = choice
score = 0.0
else:
text = choice.value
score = 1.0
choices.append({
'text': text,
'score': score
})
question_dict['choices'] = choices
return question_dict
@classmethod
def import_assessment_question(cls, question):
if 'questionHTML' in question:
question['questionHTML'] = question['questionHTML'].decode(
'string-escape')
# Convert a single question into a QuestioDTO.
if 'choices' in question:
q_dict = cls.build_multiple_choice_question_dict(
question)
question_type = QuestionDTO.MULTIPLE_CHOICE
elif 'correctAnswerNumeric' in question:
q_dict = cls.build_short_answer_question_dict(
question.get('questionHTML'),
'numeric',
question.get('correctAnswerNumeric'))
question_type = QuestionDTO.SHORT_ANSWER
elif 'correctAnswerString' in question:
q_dict = cls.build_short_answer_question_dict(
question.get('questionHTML'),
'case_insensitive',
question.get('correctAnswerString'))
question_type = QuestionDTO.SHORT_ANSWER
elif 'correctAnswerRegex' in question:
q_dict = cls.build_short_answer_question_dict(
question.get('questionHTML'),
'regex',
question.get('correctAnswerRegex').value)
question_type = QuestionDTO.SHORT_ANSWER
else:
raise ValueError('Unknown question type')
question_dto = QuestionDTO(None, q_dict)
question_dto.type = question_type
return question_dto
@classmethod
def build_question_dtos(cls, assessment_dict, template, unit, errors):
"""Convert the assessment into a list of QuestionDTO's."""
descriptions = QuestionDAO.get_questions_descriptions()
question_dtos = []
try:
for i, q in enumerate(assessment_dict['questionsList']):
description = template % (unit.title, (i + 1))
if description in descriptions:
raise CollisionError(
'Non-unique question description: %s' % description)
question_dto = cls.import_assessment_question(q)
question_dto.dict['description'] = description
question_dtos.append(question_dto)
except CollisionError:
errors.append(
'This assessment has already been imported. Remove '
'duplicate questions from the question bank in '
'order to re-import: %s.' % description)
return None
except Exception as ex: # pylint: disable=broad-except
errors.append('Unable to convert: %s' % ex)
return None
return question_dtos
class QuestionGroupEntity(BaseEntity):
"""An object representing a question group in the datastore."""
data = db.TextProperty(indexed=False)
class QuestionGroupDTO(object):
"""Data transfer object for question groups."""
def __init__(self, the_id, the_dict):
self.id = the_id
self.dict = the_dict
@property
def description(self):
return self.dict.get('description') or ''
@property
def introduction(self):
return self.dict.get('introduction') or ''
@property
def question_ids(self):
return [item['question'] for item in self.dict.get('items', [])]
@property
def items(self):
return copy.deepcopy(self.dict.get('items', []))
def add_question(self, question_id, weight):
self.dict['items'].append({'question': question_id, 'weight': weight})
@property
def last_modified(self):
return self.dict.get('last_modified') or ''
@last_modified.setter
def last_modified(self, value):
self.dict['last_modified'] = value
class QuestionGroupDAO(LastModifiedJsonDao):
DTO = QuestionGroupDTO
ENTITY = QuestionGroupEntity
ENTITY_KEY_TYPE = BaseJsonDao.EntityKeyTypeId
# Enable other modules to add post-load transformations
POST_LOAD_HOOKS = []
# Enable other modules to add post-save transformations
POST_SAVE_HOOKS = []
@classmethod
def get_question_groups_descriptions(cls):
return set([g.description for g in cls.get_all()])
@classmethod
def create_question_group(cls, question_group_dict):
question_group = QuestionGroupDTO(None, question_group_dict)
return cls.save(question_group)
@classmethod
def validate_unique_description(cls, description):
if description in cls.get_question_groups_descriptions():
raise CollisionError(
'Non-unique question group description: %s' % description)
class LabelEntity(BaseEntity):
"""A class representing labels that can be applied to Student, Unit, etc."""
data = db.TextProperty(indexed=False)
MEMCACHE_KEY = 'labels'
_PROPERTY_EXPORT_BLACKLIST = [] # No PII in labels.
def put(self):
"""Save the content to the datastore.
To support caching the list of all labels, we must invalidate
the cache on any change to any label.
Returns:
Value of entity as modified by put() (i.e., key setting)
"""
result = super(LabelEntity, self).put()
MemcacheManager.delete(self.MEMCACHE_KEY)
return result
def delete(self):
"""Remove a label from the datastore.
To support caching the list of all labels, we must invalidate
the cache on any change to any label.
"""
super(LabelEntity, self).delete()
MemcacheManager.delete(self.MEMCACHE_KEY)
class LabelDTO(object):
LABEL_TYPE_GENERAL = 0
LABEL_TYPE_COURSE_TRACK = 1
LABEL_TYPE_LOCALE = 2
# ... etc.
# If you are extending CourseBuilder, please consider picking
# a number at 1,000 or over to avoid any potential conflicts
# with types added by the CourseBuilder team in future releases.
# Provide consistent naming and labeling for admin UI elements.
LabelType = collections.namedtuple(
'LabelType', ['type', 'name', 'title', 'menu_order'])
USER_EDITABLE_LABEL_TYPES = [
LabelType(LABEL_TYPE_GENERAL, 'general', 'General', 0),
LabelType(LABEL_TYPE_COURSE_TRACK, 'course_track', 'Course Track', 1),
]
SYSTEM_EDITABLE_LABEL_TYPES = [
LabelType(LABEL_TYPE_LOCALE, 'locale', 'Language', 2),
]
LABEL_TYPES = USER_EDITABLE_LABEL_TYPES + SYSTEM_EDITABLE_LABEL_TYPES
def __init__(self, the_id, the_dict):
self.id = the_id
self.dict = the_dict # UI layer takes care of sanity-checks.
@property
def title(self):
return self.dict.get('title', '')
@property
def description(self):
return self.dict.get('description', '')
@property
def type(self):
return self.dict.get('type', self.LABEL_TYPE_GENERAL)
class LabelManager(caching.RequestScopedSingleton):
"""Class that manages optimized loading of I18N data from datastore."""
def __init__(self):
self._key_to_label = None
def _preload(self):
self._key_to_label = {}
for row in LabelDAO.get_all_iter():
self._key_to_label[row.id] = row
def _get_all(self):
if self._key_to_label is None:
self._preload()
return self._key_to_label.values()
@classmethod
def get_all(cls):
# pylint: disable=protected-access
return cls.instance()._get_all()
class LabelDAO(BaseJsonDao):
DTO = LabelDTO
ENTITY = LabelEntity
ENTITY_KEY_TYPE = BaseJsonDao.EntityKeyTypeId
@classmethod
def get_all(cls):
items = LabelManager.get_all()
order = {lt.type: lt.menu_order for lt in LabelDTO.LABEL_TYPES}
return sorted(items, key=lambda l: (order[l.type], l.title))
@classmethod
def get_all_of_type(cls, label_type):
return [label for label in cls.get_all()
if label.type == label_type]
@classmethod
def get_set_of_ids_of_type(cls, label_type):
return set([label.id for label in cls.get_all_of_type(label_type)])
@classmethod
def _apply_locale_labels_to_locale(cls, locale, items):
"""Filter out items not matching locale labels and current locale."""
if locale:
id_to_label = {}
for label in LabelDAO.get_all_of_type(
LabelDTO.LABEL_TYPE_LOCALE):
id_to_label[int(label.id)] = label
for item in list(items):
item_matches = set([int(label_id) for label_id in
common_utils.text_to_list(item.labels)
if int(label_id) in id_to_label.keys()])
found = False
for item_match in item_matches:
label = id_to_label[item_match]
if id_to_label and label and label.title == locale:
found = True
if id_to_label and item_matches and not found:
items.remove(item)
return items
@classmethod
def apply_course_track_labels_to_student_labels(
cls, course, student, items):
MemcacheManager.begin_readonly()
try:
items = cls._apply_labels_to_student_labels(
LabelDTO.LABEL_TYPE_COURSE_TRACK, student, items)
if course.get_course_setting('can_student_change_locale'):
return cls._apply_locale_labels_to_locale(
course.app_context.get_current_locale(), items)
else:
return cls._apply_labels_to_student_labels(
LabelDTO.LABEL_TYPE_LOCALE, student, items)
finally:
MemcacheManager.end_readonly()
@classmethod
def _apply_labels_to_student_labels(cls, label_type, student, items):
"""Filter out items whose labels don't match those on the student.
If the student has no labels, all items are taken.
Similarly, if a item has no labels, it is included.
Args:
label_type: a label types to consider.
student: the logged-in Student matching the user for this request.
items: a list of item instances, each having 'labels' attribute.
Returns:
A list of item instances whose labels match those on the student.
"""
label_ids = LabelDAO.get_set_of_ids_of_type(label_type)
if student and not student.is_transient:
student_matches = student.get_labels_of_type(label_type)
for item in list(items):
item_matches = set([int(label_id) for label_id in
common_utils.text_to_list(item.labels)
if int(label_id) in label_ids])
if (student_matches and item_matches and
student_matches.isdisjoint(item_matches)):
items.remove(item)
return items
class StudentPreferencesEntity(BaseEntity):
"""A class representing an individual's preferences for a course.
Note that here, we are using "Student" in the broadest sense possible:
some human associated with a course. This basically means that we want to
support preferences that are relevant to a student's view of a course, as
well as a course administrator's preferences. These will be saved in the
same object but will be edited in different editors, appropriate to the
scope of the particular field in the DTO. For example, show_hooks and
show_jinja_context are edited in the Dashboard, in
modules/dashboard/admin_preferences_editor.py
while locale is set by an Ajax widget in base.html.
Note that this type is indexed by "name" -- the key is the same as
that of the user.get_current_user().user_id(), which is a string.
This type is course-specific, so it must be accessed within a namespaced
context.
"""
data = db.TextProperty(indexed=False)
@classmethod
def safe_key(cls, db_key, transform_fn):
return db.Key.from_path(cls.kind(), transform_fn(db_key.name()))
class StudentPreferencesDTO(object):
def __init__(self, the_id, the_dict):
self.id = the_id
self.dict = the_dict
@property
def show_hooks(self):
"""Show controls to permit editing of HTML inclusions (hook points).
On course pages, there are various locations (hook points) at which
HTML content is inserted. Turn this setting on to see those locations
with controls that permit an admin to edit that HTML, and off to see
the content as a student would.
Returns:
True when admin wants to see edit controls, False when he doesn't.
"""
return self.dict.get('show_hooks', True)
@show_hooks.setter
def show_hooks(self, value):
self.dict['show_hooks'] = value
@property
def show_jinja_context(self):
"""Do/don't show dump of Jinja context on bottom of pages."""
return self.dict.get('show_jinja_context', False)
@show_jinja_context.setter
def show_jinja_context(self, value):
self.dict['show_jinja_context'] = value
@property
def locale(self):
return self.dict.get('locale')
@locale.setter
def locale(self, value):
self.dict['locale'] = value
# Save the most recently visited course page so we can redirect there
# when student revisits the (presumably bookmarked) base URL.
@property
def last_location(self):
return self.dict.get('last_location')
@last_location.setter
def last_location(self, value):
self.dict['last_location'] = value
class StudentPreferencesDAO(BaseJsonDao):
DTO = StudentPreferencesDTO
ENTITY = StudentPreferencesEntity
ENTITY_KEY_TYPE = BaseJsonDao.EntityKeyTypeName
CURRENT_VERSION = '1.0'
@classmethod
def load_or_default(cls):
user = users.get_current_user()
if not user:
return None
user_id = user.user_id()
prefs = cls.load(user_id)
if not prefs:
prefs = StudentPreferencesDTO(
user_id, {
'version': cls.CURRENT_VERSION,
'show_hooks': False,
'show_jinja_context': False
})
return prefs
class RoleEntity(BaseEntity):
data = db.TextProperty(indexed=False)
class RoleDTO(object):
"""Data transfer object for roles."""
def __init__(self, the_id, the_dict):
self.id = the_id
self.dict = the_dict
@property
def name(self):
return self.dict.get('name', '')
@property
def description(self):
return self.dict.get('description', '')
@property
def users(self):
return self.dict.get('users', [])
@property
def permissions(self):
return self.dict.get('permissions', {})
class RoleDAO(BaseJsonDao):
DTO = RoleDTO
ENTITY = RoleEntity
ENTITY_KEY_TYPE = BaseJsonDao.EntityKeyTypeId
def get_global_handlers():
return [
(StudentLifecycleObserver.URL, StudentLifecycleObserver),
]
def register_for_data_removal():
data_removal.Registry.register_sitewide_indexed_by_user_id_remover(
StudentProfileDAO.delete_profile_by_user_id)
removers = [
Student.delete_by_user_id,
StudentAnswersEntity.delete_by_key,
StudentPropertyEntity.delete_by_user_id_prefix,
StudentPreferencesEntity.delete_by_key,
]
for remover in removers:
data_removal.Registry.register_indexed_by_user_id_remover(remover)
data_removal.Registry.register_unindexed_entity_class(EventEntity)
| 36.946724 | 80 | 0.639822 |
"""Core data model classes."""
__author__ = 'Pavel Simakov (psimakov@google.com)'
import collections
import copy
import datetime
import logging
import os
import sys
import time
import webapp2
import jinja2
import config
import counters
from counters import PerfCounter
from entities import BaseEntity
from entities import delete
from entities import get
from entities import put
import data_removal
import messages
import services
import transforms
import appengine_config
from common import caching
from common import utils as common_utils
from common import users
from google.appengine.api import app_identity
from google.appengine.api import mail
from google.appengine.api import memcache
from google.appengine.api import namespace_manager
from google.appengine.api import taskqueue
from google.appengine.ext import db
NO_OBJECT = {}
DEFAULT_CACHE_TTL_SECS = 60 * 5
= (1000 * 1000 - 96 - 250)
MEMCACHE_MULTI_MAX = 32 * 1000 * 1000
STUDENT_LAST_SEEN_ON_UPDATE_SEC = 24 * 60 * 60
CAN_USE_MEMCACHE = config.ConfigProperty(
'gcb_can_use_memcache', bool, messages.SITE_SETTINGS_MEMCACHE,
default_value=appengine_config.PRODUCTION_MODE, label='Memcache')
CACHE_PUT = PerfCounter(
'gcb-models-cache-put',
'A number of times an object was put into memcache.')
CACHE_PUT_TOO_BIG = PerfCounter(
'gcb-models-cache-put-too-big',
'Number of times an object was too big to put in memcache.')
CACHE_HIT = PerfCounter(
'gcb-models-cache-hit',
'A number of times an object was found in memcache.')
CACHE_MISS = PerfCounter(
'gcb-models-cache-miss',
'A number of times an object was not found in memcache.')
CACHE_DELETE = PerfCounter(
'gcb-models-cache-delete',
'A number of times an object was deleted from memcache.')
CACHE_PUT_LOCAL = PerfCounter(
'gcb-models-cache-put-local',
'A number of times an object was put into local memcache.')
CACHE_HIT_LOCAL = PerfCounter(
'gcb-models-cache-hit-local',
'A number of times an object was found in local memcache.')
CACHE_MISS_LOCAL = PerfCounter(
'gcb-models-cache-miss-local',
'A number of times an object was not found in local memcache.')
WELCOME_NOTIFICATION_INTENT = 'welcome'
class MemcacheManager(object):
"""Class that consolidates all memcache operations."""
_LOCAL_CACHE = None
_IS_READONLY = False
_READONLY_REENTRY_COUNT = 0
_READONLY_APP_CONTEXT = None
@classmethod
def _is_same_app_context_if_set(cls):
if cls._READONLY_APP_CONTEXT is None:
return True
from controllers import sites
app_context = sites.get_course_for_current_request()
return cls._READONLY_APP_CONTEXT == app_context
@classmethod
def _assert_true_clear_cache_and_raise_if_not(cls, value_to_assert, msg):
if not value_to_assert:
cls.clear_readonly_cache()
raise AssertionError(msg)
@classmethod
def _fs_begin_readonly(cls):
from controllers import sites
cls._READONLY_APP_CONTEXT = sites.get_course_for_current_request()
if cls._READONLY_APP_CONTEXT:
cls._READONLY_APP_CONTEXT.fs.begin_readonly()
@classmethod
def _fs_end_readonly(cls):
if cls._READONLY_APP_CONTEXT:
cls._READONLY_APP_CONTEXT.fs.end_readonly()
cls._READONLY_APP_CONTEXT = None
@classmethod
def begin_readonly(cls):
cls._assert_true_clear_cache_and_raise_if_not(
cls._READONLY_REENTRY_COUNT >= 0, 'Re-entry counter is < 0.')
cls._assert_true_clear_cache_and_raise_if_not(
cls._is_same_app_context_if_set(), 'Unable to switch app_context.')
if cls._READONLY_REENTRY_COUNT == 0:
appengine_config.log_appstats_event(
'MemcacheManager.begin_readonly')
cls._IS_READONLY = True
cls._LOCAL_CACHE = {}
cls._fs_begin_readonly()
cls._READONLY_REENTRY_COUNT += 1
@classmethod
def end_readonly(cls):
cls._assert_true_clear_cache_and_raise_if_not(
cls._READONLY_REENTRY_COUNT > 0, 'Re-entry counter <= 0.')
cls._assert_true_clear_cache_and_raise_if_not(
cls._is_same_app_context_if_set(), 'Unable to switch app_context.')
cls._READONLY_REENTRY_COUNT -= 1
if cls._READONLY_REENTRY_COUNT == 0:
cls._fs_end_readonly()
cls._IS_READONLY = False
cls._LOCAL_CACHE = None
cls._READONLY_APP_CONTEXT = None
appengine_config.log_appstats_event('MemcacheManager.end_readonly')
@classmethod
def clear_readonly_cache(cls):
cls._LOCAL_CACHE = None
cls._IS_READONLY = False
cls._READONLY_REENTRY_COUNT = 0
if cls._READONLY_APP_CONTEXT and (
cls._READONLY_APP_CONTEXT.fs.is_in_readonly):
cls._READONLY_APP_CONTEXT.fs.end_readonly()
cls._READONLY_APP_CONTEXT = None
@classmethod
def _local_cache_get(cls, key, namespace):
if cls._IS_READONLY:
assert cls._is_same_app_context_if_set()
_dict = cls._LOCAL_CACHE.get(namespace)
if not _dict:
_dict = {}
cls._LOCAL_CACHE[namespace] = _dict
if key in _dict:
CACHE_HIT_LOCAL.inc()
value = _dict[key]
return True, value
else:
CACHE_MISS_LOCAL.inc()
return False, None
@classmethod
def _local_cache_put(cls, key, namespace, value):
if cls._IS_READONLY:
assert cls._is_same_app_context_if_set()
_dict = cls._LOCAL_CACHE.get(namespace)
if not _dict:
_dict = {}
cls._LOCAL_CACHE[namespace] = _dict
_dict[key] = value
CACHE_PUT_LOCAL.inc()
@classmethod
def _local_cache_get_multi(cls, keys, namespace):
if cls._IS_READONLY:
assert cls._is_same_app_context_if_set()
values = []
for key in keys:
is_cached, value = cls._local_cache_get(key, namespace)
if not is_cached:
return False, []
else:
values.append(value)
return True, values
return False, []
@classmethod
def _local_cache_put_multi(cls, values, namespace):
if cls._IS_READONLY:
assert cls._is_same_app_context_if_set()
for key, value in values.items():
cls._local_cache_put(key, namespace, value)
@classmethod
def get_namespace(cls):
"""Look up namespace from namespace_manager or use default."""
namespace = namespace_manager.get_namespace()
if namespace:
return namespace
return appengine_config.DEFAULT_NAMESPACE_NAME
@classmethod
def _get_namespace(cls, namespace):
if namespace is not None:
return namespace
return cls.get_namespace()
@classmethod
def get(cls, key, namespace=None):
"""Gets an item from memcache if memcache is enabled."""
if not CAN_USE_MEMCACHE.value:
return None
_namespace = cls._get_namespace(namespace)
is_cached, value = cls._local_cache_get(key, _namespace)
if is_cached:
return copy.deepcopy(value)
value = memcache.get(key, namespace=_namespace)
# real objects, '{}' for example. Count a cache miss only in a case when
# an object is None.
if value is not None:
CACHE_HIT.inc()
else:
CACHE_MISS.inc(context=key)
cls._local_cache_put(key, _namespace, value)
return copy.deepcopy(value)
@classmethod
def get_multi(cls, keys, namespace=None):
"""Gets a set of items from memcache if memcache is enabled."""
if not CAN_USE_MEMCACHE.value:
return {}
_namespace = cls._get_namespace(namespace)
is_cached, values = cls._local_cache_get_multi(keys, _namespace)
if is_cached:
return values
values = memcache.get_multi(keys, namespace=_namespace)
for key, value in values.items():
if value is not None:
CACHE_HIT.inc()
else:
logging.info('Cache miss, key: %s. %s', key, Exception())
CACHE_MISS.inc(context=key)
cls._local_cache_put_multi(values, _namespace)
return values
@classmethod
def set(cls, key, value, ttl=DEFAULT_CACHE_TTL_SECS, namespace=None,
propagate_exceptions=False):
"""Sets an item in memcache if memcache is enabled."""
# Ensure subsequent mods to value do not affect the cached copy.
value = copy.deepcopy(value)
try:
if CAN_USE_MEMCACHE.value:
size = sys.getsizeof(value)
if size > MEMCACHE_MAX:
CACHE_PUT_TOO_BIG.inc()
else:
CACHE_PUT.inc()
_namespace = cls._get_namespace(namespace)
memcache.set(key, value, ttl, namespace=_namespace)
cls._local_cache_put(key, _namespace, value)
except: # pylint: disable=bare-except
if propagate_exceptions:
raise
else:
logging.exception(
'Failed to set: %s, %s', key, cls._get_namespace(namespace))
return None
@classmethod
def set_multi(cls, mapping, ttl=DEFAULT_CACHE_TTL_SECS, namespace=None):
"""Sets a dict of items in memcache if memcache is enabled."""
try:
if CAN_USE_MEMCACHE.value:
if not mapping:
return
size = sum([
sys.getsizeof(key) + sys.getsizeof(value)
for key, value in mapping.items()])
if size > MEMCACHE_MULTI_MAX:
CACHE_PUT_TOO_BIG.inc()
else:
CACHE_PUT.inc()
_namespace = cls._get_namespace(namespace)
memcache.set_multi(mapping, time=ttl, namespace=_namespace)
cls._local_cache_put_multi(mapping, _namespace)
except: # pylint: disable=bare-except
logging.exception(
'Failed to set_multi: %s, %s',
mapping, cls._get_namespace(namespace))
return None
@classmethod
def delete(cls, key, namespace=None):
"""Deletes an item from memcache if memcache is enabled."""
assert not cls._IS_READONLY
if CAN_USE_MEMCACHE.value:
CACHE_DELETE.inc()
memcache.delete(key, namespace=cls._get_namespace(namespace))
@classmethod
def delete_multi(cls, key_list, namespace=None):
"""Deletes a list of items from memcache if memcache is enabled."""
assert not cls._IS_READONLY
if CAN_USE_MEMCACHE.value:
CACHE_DELETE.inc(increment=len(key_list))
memcache.delete_multi(
key_list, namespace=cls._get_namespace(namespace))
@classmethod
def incr(cls, key, delta, namespace=None):
"""Incr an item in memcache if memcache is enabled."""
if CAN_USE_MEMCACHE.value:
memcache.incr(
key, delta,
namespace=cls._get_namespace(namespace), initial_value=0)
CAN_AGGREGATE_COUNTERS = config.ConfigProperty(
'gcb_can_aggregate_counters', bool,
messages.SITE_SETTINGS_AGGREGATE_COUNTERS, default_value=False,
label='Aggregate Counters')
def incr_counter_global_value(name, delta):
if CAN_AGGREGATE_COUNTERS.value:
MemcacheManager.incr(
'counter:' + name, delta,
namespace=appengine_config.DEFAULT_NAMESPACE_NAME)
def get_counter_global_value(name):
if CAN_AGGREGATE_COUNTERS.value:
return MemcacheManager.get(
'counter:' + name,
namespace=appengine_config.DEFAULT_NAMESPACE_NAME)
else:
return None
counters.get_counter_global_value = get_counter_global_value
counters.incr_counter_global_value = incr_counter_global_value
DEPRECATED_CAN_SHARE_STUDENT_PROFILE = config.ConfigProperty(
'gcb_can_share_student_profile', bool, '', default_value=False,
deprecated=True)
class CollisionError(Exception):
"""Exception raised to show that a collision in a namespace has occurred."""
class ValidationError(Exception):
"""Exception raised to show that a validation failed."""
class ContentChunkEntity(BaseEntity):
"""Defines storage for ContentChunk, a blob of opaque content to display."""
_PROPERTY_EXPORT_BLACKLIST = [] # No PII in ContentChunks.
# A string that gives the type of the content chunk. At the data layer we
# make no restrictions on the values that can be used here -- we only
# require that a type is given. The type here may be independent of any
# notion of Content-Type in an HTTP header.
content_type = db.StringProperty(required=True)
# UTC last modification timestamp.
last_modified = db.DateTimeProperty(auto_now=True, required=True)
# Whether or not the chunk supports custom tags. If True, the renderer may
# be extended to parse and render those tags at display time (this is a stub
# for future functionality that does not exist yet). If False, the contents
# of the chunk will be rendered verbatim.
supports_custom_tags = db.BooleanProperty(default=False)
# Optional identifier for the chunk in the system it was sourced from.
# Format is type_id:resource_id where type_id is an identifier that maps to
# an external system and resource_id is the identifier for a resource within
# that system (e.g. 'drive:1234' or 'web:http://example.com/index.html').
# Exact values are up to the caller, but if either type_id or resource_id is
# given, both must be, they must both be truthy, and type_id cannot contain
# ':'. Max size is 500B, enforced by datastore.
uid = db.StringProperty(indexed=True)
# Payload of the chunk. Max size is 1MB, enforced by datastore.
contents = db.TextProperty()
class ContentChunkDAO(object):
"""Data access object for ContentChunks."""
@classmethod
def delete(cls, entity_id):
"""Deletes ContentChunkEntity for datastore id int; returns None."""
memcache_key = cls._get_memcache_key(entity_id)
entity = ContentChunkEntity.get_by_id(entity_id)
if entity:
delete(entity)
MemcacheManager.delete(memcache_key)
@classmethod
def get(cls, entity_id):
"""Gets ContentChunkEntityDTO or None from given datastore id int."""
if entity_id is None:
return
memcache_key = cls._get_memcache_key(entity_id)
found = MemcacheManager.get(memcache_key)
if found == NO_OBJECT:
return None
elif found:
return found
else:
result = None
cache_value = NO_OBJECT
entity = ContentChunkEntity.get_by_id(entity_id)
if entity:
result = cls._make_dto(entity)
cache_value = result
MemcacheManager.set(memcache_key, cache_value)
return result
@classmethod
def get_by_uid(cls, uid):
"""Gets list of DTOs for all entities with given uid string."""
results = ContentChunkEntity.all().filter(
ContentChunkEntity.uid.name, uid
).fetch(1000)
return sorted(
[cls._make_dto(result) for result in results],
key=lambda dto: dto.id)
@classmethod
def get_or_new_by_uid(cls, uid):
result = cls.get_one_by_uid(uid)
if result is not None:
return result
else:
type_id, resource_id = cls._split_uid(uid)
return ContentChunkDTO({
'type_id': type_id,
'resource_id': resource_id,
})
@classmethod
def get_one_by_uid(cls, uid):
matches = cls.get_by_uid(uid)
if matches:
# There is a data race in the DAO -- it's possible to create two
return matches[0]
else:
return None
@classmethod
def make_uid(cls, type_id, resource_id):
"""Makes a uid string (or None) from the given strings (or Nones)."""
if type_id is None and resource_id is None:
return None
assert type_id and resource_id and ':' not in type_id
return '%s:%s' % (type_id, resource_id)
@classmethod
def save(cls, dto):
"""Saves content of DTO and returns the key of the saved entity.
Handles both creating new and updating existing entities. If the id of a
passed DTO is found, the entity will be updated; otherwise, the entity
will be created.
Note that this method does not refetch the saved entity from the
datastore after put since this is impossible in a transaction. This
means the last_modified date we put in the cache skews from the actual
saved value by however long put took. This is expected datastore
behavior; we do not at present have a use case for perfect accuracy in
this value for our getters.
Args:
dto: ContentChunkDTO. DTO to save. Its last_modified field is
ignored.
Returns:
db.Key of saved ContentChunkEntity.
"""
return cls.save_all([dto])[0]
@classmethod
def save_all(cls, dtos):
"""Saves all given DTOs; see save() for semantics.
Args:
dtos: list of ContentChunkDTO. The last_modified field is ignored.
Returns:
List of db.Key of saved ContentChunkEntities, in order of dto input.
"""
entities = []
for dto in dtos:
if dto.id is None:
entity = ContentChunkEntity(content_type=dto.content_type)
else:
entity = ContentChunkEntity.get_by_id(dto.id)
if entity is None:
entity = ContentChunkEntity(content_type=dto.content_type)
entity.content_type = dto.content_type
entity.contents = dto.contents
entity.supports_custom_tags = dto.supports_custom_tags
entity.uid = cls.make_uid(dto.type_id, dto.resource_id)
entities.append(entity)
db.put(entities)
for entity in entities:
MemcacheManager.delete(cls._get_memcache_key(entity.key().id()))
return [entity.key() for entity in entities]
@classmethod
def _get_memcache_key(cls, entity_id):
assert entity_id is not None
return '(%s:%s)' % (ContentChunkEntity.kind(), entity_id)
@classmethod
def _make_dto(cls, entity):
type_id, resource_id = cls._split_uid(entity.uid)
return ContentChunkDTO({
'content_type': entity.content_type,
'contents': entity.contents,
'id': entity.key().id(),
'last_modified': entity.last_modified,
'resource_id': resource_id,
'supports_custom_tags': entity.supports_custom_tags,
'type_id': type_id,
})
@classmethod
def _split_uid(cls, uid):
resource_id = None
type_id = None
if uid is not None:
assert ':' in uid
type_id, resource_id = uid.split(':', 1)
assert type_id and resource_id
return type_id, resource_id
class ContentChunkDTO(object):
"""Data transfer object for ContentChunks."""
def __init__(self, entity_dict):
self.content_type = entity_dict.get('content_type')
self.contents = entity_dict.get('contents')
self.id = entity_dict.get('id')
self.last_modified = entity_dict.get('last_modified')
self.resource_id = entity_dict.get('resource_id')
self.supports_custom_tags = entity_dict.get('supports_custom_tags')
self.type_id = entity_dict.get('type_id')
def __eq__(self, other):
return (
isinstance(other, ContentChunkDTO) and
self.content_type == other.content_type and
self.contents == other.contents and
self.id == other.id and
self.last_modified == other.last_modified and
self.resource_id == other.resource_id and
self.supports_custom_tags == other.supports_custom_tags and
self.type_id == other.type_id)
class PersonalProfile(BaseEntity):
"""Personal information not specific to any course instance."""
email = db.StringProperty(indexed=False)
legal_name = db.StringProperty(indexed=False)
nick_name = db.StringProperty(indexed=False)
date_of_birth = db.DateProperty(indexed=False)
course_info = db.TextProperty()
_PROPERTY_EXPORT_BLACKLIST = [email, legal_name, nick_name, date_of_birth]
@property
def user_id(self):
return self.key().name()
@classmethod
def safe_key(cls, db_key, transform_fn):
return db.Key.from_path(cls.kind(), transform_fn(db_key.name()))
class PersonalProfileDTO(object):
"""DTO for PersonalProfile."""
def __init__(self, personal_profile=None):
self.course_info = '{}'
if personal_profile:
self.user_id = personal_profile.user_id
self.email = personal_profile.email
self.legal_name = personal_profile.legal_name
self.nick_name = personal_profile.nick_name
self.date_of_birth = personal_profile.date_of_birth
self.course_info = personal_profile.course_info
QUEUE_RETRIES_BEFORE_SENDING_MAIL = config.ConfigProperty(
'gcb_lifecycle_queue_retries_before_sending_mail', int,
messages.SITE_SETTINGS_QUEUE_NOTIFICATION, default_value=10,
label='Queue Notification',
validator=config.ValidateIntegerRange(1, 50).validate)
class StudentLifecycleObserver(webapp2.RequestHandler):
"""Provides notification on major events to Students for interested modules.
Notification is done from an App Engine deferred work queue. This is done
so that observers who _absolutely_ _positively_ _have_ _to_ _be_ _notified_
are either going to be notified, or a site administrator is going to get
an ongoing sequence of email notifications that Something Is Wrong, and
will then address the problem manually.
Modules can register to be called back for the lifecycle events listed
below. Callbacks should be registered like this:
models.StudentLifecycleObserver.EVENT_CALLBACKS[
models.StudentLifecycleObserver.EVENT_ADD]['my_module'] = my_handler
If a callback function needs to have extra information passed to it that
needs to be collected in the context where the lifecycle event is actually
happening, modules can register a function in the ENQUEUE_CALLBACKS. Add
one of those in the same way as for event callbacks:
models.StudentLifecycleObserver.ENQUEUE_CALLBACKS[
models.StudentLifecycleObserver.EVENT_ADD]['my_module'] = a_function
Event notification callbacks are called repeatedly until they return
without raising an exception. Note that due to retries having an
exponential backoff (to a maximum of two hours), you cannot rely on
notifications being delivered in any particular order relative to one
another.
Event notification callbacks must take two or three parameters:
- the user_id (a string)
- the datetime.datetime UTC timestamp when the event originally occurred.
- If-and-only-if the the enqueue callback was registered and returned a
non-None value, this argument is passed. If this value is mutable, any
changes made by the event callback will be retained in any future
re-tries.
Enqueue callback functions must take exactly one parameter:
- the user_id of the user to which the event pertains.
The function may return any data type which is convertible to a JSON
string using transforms.dumps(). This value is passed to the event
notification callback.
"""
QUEUE_NAME = 'user-lifecycle'
URL = '/_ah/queue/' + QUEUE_NAME
EVENT_ADD = 'add'
EVENT_UNENROLL = 'unenroll'
EVENT_UNENROLL_COMMANDED = 'unenroll_commanded'
EVENT_REENROLL = 'reenroll'
EVENT_CALLBACKS = {
EVENT_ADD: {},
EVENT_UNENROLL: {},
EVENT_UNENROLL_COMMANDED: {},
EVENT_REENROLL: {},
}
ENQUEUE_CALLBACKS = {
EVENT_ADD: {},
EVENT_UNENROLL: {},
EVENT_UNENROLL_COMMANDED: {},
EVENT_REENROLL: {},
}
@classmethod
def enqueue(cls, event, user_id, transactional=True):
if event not in cls.EVENT_CALLBACKS:
raise ValueError('Event "%s" not in allowed list: %s' % (
event, ' '.join(cls.EVENT_CALLBACKS)))
if not user_id:
raise ValueError('User ID must be non-blank')
if not cls.EVENT_CALLBACKS[event]:
return
extra_data = {}
for name, callback in cls.ENQUEUE_CALLBACKS[event].iteritems():
extra_data[name] = callback(user_id)
cls._internal_enqueue(
event, user_id, cls.EVENT_CALLBACKS[event].keys(), extra_data,
transactional=transactional)
@classmethod
def _internal_enqueue(cls, event, user_id, callbacks, extra_data,
transactional):
for callback in callbacks:
if callback not in cls.EVENT_CALLBACKS[event]:
raise ValueError(
'Callback "%s" not in callbacks registered for event %s'
% (callback, event))
task = taskqueue.Task(params={
'event': event,
'user_id': user_id,
'callbacks': ' '.join(callbacks),
'timestamp': datetime.datetime.utcnow().strftime(
transforms.ISO_8601_DATETIME_FORMAT),
'extra_data': transforms.dumps(extra_data),
})
task.add(cls.QUEUE_NAME, transactional=transactional)
def post(self):
if 'X-AppEngine-QueueName' not in self.request.headers:
self.response.set_status(500)
return
user_id = self.request.get('user_id')
if not user_id:
logging.critical('Student lifecycle queue had item with no user')
self.response.set_status(200)
return
event = self.request.get('event')
if not event:
logging.critical('Student lifecycle queue had item with no event')
self.response.set_status(200)
return
timestamp_str = self.request.get('timestamp')
try:
timestamp = datetime.datetime.strptime(
timestamp_str, transforms.ISO_8601_DATETIME_FORMAT)
except ValueError:
logging.critical('Student lifecycle queue: malformed timestamp %s',
timestamp_str)
self.response.set_status(200)
return
extra_data = self.request.get('extra_data')
if extra_data:
extra_data = transforms.loads(extra_data)
else:
extra_data = {}
callbacks = self.request.get('callbacks')
if not callbacks:
logging.warning('Odd: Student lifecycle with no callback items')
self.response.set_status(200)
return
callbacks = callbacks.split(' ')
current_namespace = namespace_manager.get_namespace()
logging.info(
'-- Dequeue in namespace "%s" handling event %s for user %s --',
current_namespace, event, user_id)
from controllers import sites
app_context = sites.get_course_index().get_app_context_for_namespace(
current_namespace)
path = app_context.get_slug()
if hasattr(sites.PATH_INFO_THREAD_LOCAL, 'path'):
has_path_info = True
save_path_info = sites.PATH_INFO_THREAD_LOCAL.path
else:
has_path_info = False
sites.PATH_INFO_THREAD_LOCAL.path = path
try:
remaining_callbacks = []
for callback in callbacks:
if callback not in self.EVENT_CALLBACKS[event]:
logging.error(
'Student lifecycle event enqueued with callback named '
'"%s", but no such callback is currently registered.',
callback)
continue
try:
logging.info('-- Student lifecycle callback %s starting --',
callback)
callback_extra_data = extra_data.get(callback)
if callback_extra_data is None:
self.EVENT_CALLBACKS[event][callback](
user_id, timestamp)
else:
self.EVENT_CALLBACKS[event][callback](
user_id, timestamp, callback_extra_data)
logging.info('-- Student lifecycle callback %s success --',
callback)
except Exception, ex:
logging.error(
'-- Student lifecycle callback %s fails: %s --',
callback, str(ex))
common_utils.log_exception_origin()
remaining_callbacks.append(callback)
finally:
if has_path_info:
sites.PATH_INFO_THREAD_LOCAL.path = save_path_info
else:
del sites.PATH_INFO_THREAD_LOCAL.path
if remaining_callbacks == callbacks:
num_tries = 1 + int(
self.request.headers.get('X-AppEngine-TaskExecutionCount', '0'))
complaint = (
'Student lifecycle callback in namespace %s for '
'event %s enqueued at %s made no progress on any of the '
'callbacks %s for user %s after %d attempts' % (
namespace_manager.get_namespace() or '<blank>',
event, timestamp_str, callbacks, user_id, num_tries))
logging.warning(complaint)
if num_tries >= QUEUE_RETRIES_BEFORE_SENDING_MAIL.value:
app_id = app_identity.get_application_id()
sender = 'queue_admin@%s.appspotmail.com' % app_id
subject = ('Queue processing: Excessive retries '
'on student lifecycle queue')
body = complaint + ' in application ' + app_id
mail.send_mail_to_admins(sender, subject, body)
raise RuntimeError(
'Queued work incomplete; raising error to force retries.')
else:
if remaining_callbacks:
logging.warning(
'Student lifecycle callback for event %s enqueued at %s '
'made some progress, but needs retries for the following '
'callbacks: %s', event, timestamp_str, callbacks)
self._internal_enqueue(
event, user_id, remaining_callbacks, extra_data,
transactional=False)
self.response.set_status(200)
class StudentProfileDAO(object):
"""All access and mutation methods for PersonalProfile and Student."""
TARGET_NAMESPACE = appengine_config.DEFAULT_NAMESPACE_NAME
STUDENT_CREATION_HOOKS = []
@classmethod
def _memcache_key(cls, key):
"""Makes a memcache key from primary key."""
return 'entity:personal-profile:%s' % key
@classmethod
def _get_profile_by_user_id(cls, user_id):
"""Loads profile given a user_id and returns Entity object."""
old_namespace = namespace_manager.get_namespace()
try:
namespace_manager.set_namespace(cls.TARGET_NAMESPACE)
profile = MemcacheManager.get(
cls._memcache_key(user_id), namespace=cls.TARGET_NAMESPACE)
if profile == NO_OBJECT:
return None
if profile:
return profile
profile = PersonalProfile.get_by_key_name(user_id)
MemcacheManager.set(
cls._memcache_key(user_id), profile if profile else NO_OBJECT,
namespace=cls.TARGET_NAMESPACE)
return profile
finally:
namespace_manager.set_namespace(old_namespace)
@classmethod
def delete_profile_by_user_id(cls, user_id):
with common_utils.Namespace(cls.TARGET_NAMESPACE):
PersonalProfile.delete_by_key(user_id)
MemcacheManager.delete(
cls._memcache_key(user_id), namespace=cls.TARGET_NAMESPACE)
@classmethod
def add_new_profile(cls, user_id, email):
"""Adds new profile for a user_id and returns Entity object."""
old_namespace = namespace_manager.get_namespace()
try:
namespace_manager.set_namespace(cls.TARGET_NAMESPACE)
profile = PersonalProfile(key_name=user_id)
profile.email = email
profile.put()
return profile
finally:
namespace_manager.set_namespace(old_namespace)
@classmethod
def _update_global_profile_attributes(
cls, profile,
email=None, legal_name=None, nick_name=None,
date_of_birth=None, is_enrolled=None, final_grade=None,
course_info=None):
"""Modifies various attributes of Student's Global Profile."""
if email is not None:
profile.email = email.lower()
if legal_name is not None:
profile.legal_name = legal_name
if nick_name is not None:
profile.nick_name = nick_name
if date_of_birth is not None:
profile.date_of_birth = date_of_birth
# TODO(nretallack): Remove this block and re-calculate this dynamically
if final_grade is not None or course_info is not None:
# Defer to avoid circular import.
from controllers import sites
course = sites.get_course_for_current_request()
course_namespace = course.get_namespace_name()
course_info_dict = {}
if profile.course_info:
course_info_dict = transforms.loads(profile.course_info)
info = course_info_dict.get(course_namespace, {})
if final_grade:
info['final_grade'] = final_grade
if course_info:
info['info'] = course_info
course_info_dict[course_namespace] = info
profile.course_info = transforms.dumps(course_info_dict)
@classmethod
def _update_course_profile_attributes(
cls, student, nick_name=None, is_enrolled=None, labels=None):
"""Modifies various attributes of Student's Course Profile."""
if nick_name is not None:
student.name = nick_name
if is_enrolled is not None:
student.is_enrolled = is_enrolled
if labels is not None:
student.labels = labels
@classmethod
def _update_attributes(
cls, profile, student,
email=None, legal_name=None, nick_name=None,
date_of_birth=None, is_enrolled=None, final_grade=None,
course_info=None, labels=None):
"""Modifies various attributes of Student and Profile."""
if profile:
cls._update_global_profile_attributes(
profile, email=email, legal_name=legal_name,
nick_name=nick_name, date_of_birth=date_of_birth,
is_enrolled=is_enrolled, final_grade=final_grade,
course_info=course_info)
if student:
cls._update_course_profile_attributes(
student, nick_name=nick_name, is_enrolled=is_enrolled,
labels=labels)
@classmethod
def _put_profile(cls, profile):
"""Does a put() on profile objects."""
if not profile:
return
profile.put()
MemcacheManager.delete(
cls._memcache_key(profile.user_id),
namespace=cls.TARGET_NAMESPACE)
@classmethod
def get_profile_by_user_id(cls, user_id):
"""Loads profile given a user_id and returns DTO object."""
profile = cls._get_profile_by_user_id(user_id)
if profile:
return PersonalProfileDTO(personal_profile=profile)
return None
@classmethod
def get_profile_by_user(cls, user):
return cls.get_profile_by_user_id(user.user_id())
@classmethod
def add_new_student_for_current_user(
cls, nick_name, additional_fields, handler, labels=None):
user = users.get_current_user()
student_by_uid = Student.get_by_user_id(user.user_id())
is_valid_student = (student_by_uid is None or
student_by_uid.user_id == user.user_id())
assert is_valid_student, (
'Student\'s email and user id do not match.')
student = cls._add_new_student_for_current_user(
user.user_id(), user.email(), nick_name, additional_fields, labels)
try:
cls._send_welcome_notification(handler, student)
except Exception, e: # On purpose. pylint: disable=broad-except
logging.error(
'Unable to send welcome notification; error was: ' + str(e))
@classmethod
def _add_new_student_for_current_user(
cls, user_id, email, nick_name, additional_fields, labels=None):
student = Student.get_by_user_id(user_id)
key_name = None
if student:
key_name = student.key().name()
student = cls._add_new_student_for_current_user_in_txn(
key_name, user_id, email, nick_name, additional_fields, labels)
return student
@classmethod
@db.transactional(xg=True)
def _add_new_student_for_current_user_in_txn(
cls, key_name, user_id, email, nick_name, additional_fields,
labels=None):
"""Create new or re-enroll old student."""
# create profile if does not exist
profile = cls._get_profile_by_user_id(user_id)
if not profile:
profile = cls.add_new_profile(user_id, email)
# create new student or re-enroll existing
if key_name:
student = Student.get_by_key_name(key_name)
else:
student = Student._add_new( # pylint: disable=protected-access
user_id, email)
# update profile
cls._update_attributes(
profile, student, nick_name=nick_name, is_enrolled=True,
labels=labels)
# update student
student.email = email.lower()
student.additional_fields = additional_fields
common_utils.run_hooks(cls.STUDENT_CREATION_HOOKS, student, profile)
# put both
cls._put_profile(profile)
student.put()
StudentLifecycleObserver.enqueue(
StudentLifecycleObserver.EVENT_ADD, user_id)
return student
@classmethod
def _send_welcome_notification(cls, handler, student):
if not cls._can_send_welcome_notifications(handler):
return
if services.unsubscribe.has_unsubscribed(student.email):
return
course_settings = handler.app_context.get_environ()['course']
course_title = course_settings['title']
sender = cls._get_welcome_notifications_sender(handler)
assert sender, 'Must set welcome_notifications_sender in course.yaml'
context = {
'student_name': student.name,
'course_title': course_title,
'course_url': handler.get_base_href(handler),
'unsubscribe_url': services.unsubscribe.get_unsubscribe_url(
handler, student.email)
}
if course_settings.get('welcome_notifications_subject'):
subject = jinja2.Template(unicode(
course_settings['welcome_notifications_subject']
)).render(context)
else:
subject = 'Welcome to ' + course_title
if course_settings.get('welcome_notifications_body'):
body = jinja2.Template(unicode(
course_settings['welcome_notifications_body']
)).render(context)
else:
jinja_environment = handler.app_context.fs.get_jinja_environ(
[os.path.join(
appengine_config.BUNDLE_ROOT, 'views', 'notifications')],
autoescape=False)
body = jinja_environment.get_template('welcome.txt').render(context)
services.notifications.send_async(
student.email, sender, WELCOME_NOTIFICATION_INTENT,
body, subject, audit_trail=context,
)
@classmethod
def _can_send_welcome_notifications(cls, handler):
return (
services.notifications.enabled() and services.unsubscribe.enabled()
and cls._get_send_welcome_notifications(handler))
@classmethod
def _get_send_welcome_notifications(cls, handler):
return handler.app_context.get_environ().get(
'course', {}
).get('send_welcome_notifications', False)
@classmethod
def _get_welcome_notifications_sender(cls, handler):
return handler.app_context.get_environ().get(
'course', {}
).get('welcome_notifications_sender')
@classmethod
def get_enrolled_student_by_user_for(cls, user, app_context):
"""Returns student for a specific course."""
old_namespace = namespace_manager.get_namespace()
try:
namespace_manager.set_namespace(app_context.get_namespace_name())
return Student.get_enrolled_student_by_user(user)
finally:
namespace_manager.set_namespace(old_namespace)
@classmethod
def unregister_user(cls, user_id, unused_timestamp):
student = Student.get_by_user_id(user_id)
if not student:
logging.info(
'Unregister commanded for user %s, but user already gone.',
user_id)
return
cls.update(user_id, None, is_enrolled=False)
@classmethod
def update(
cls, user_id, email, legal_name=None, nick_name=None,
date_of_birth=None, is_enrolled=None, final_grade=None,
course_info=None, labels=None, profile_only=False):
student = Student.get_by_user_id(user_id)
key_name = None
if student:
key_name = student.key().name()
cls._update_in_txn(
key_name, user_id, email, legal_name, nick_name, date_of_birth,
is_enrolled, final_grade, course_info, labels, profile_only)
@classmethod
@db.transactional(xg=True)
def _update_in_txn(
cls, key_name, user_id, email, legal_name=None, nick_name=None,
date_of_birth=None, is_enrolled=None, final_grade=None,
course_info=None, labels=None, profile_only=False):
"""Updates a student and/or their global profile."""
student = None
if not profile_only:
if key_name:
student = Student.get_by_key_name(key_name)
if not student:
raise Exception('Unable to find student for: %s' % email)
profile = cls._get_profile_by_user_id(user_id)
if not profile:
profile = cls.add_new_profile(user_id, email)
if (student and is_enrolled is not None and
student.is_enrolled != is_enrolled):
if is_enrolled:
event = StudentLifecycleObserver.EVENT_REENROLL
else:
event = StudentLifecycleObserver.EVENT_UNENROLL
StudentLifecycleObserver.enqueue(event, student.user_id)
cls._update_attributes(
profile, student, email=email, legal_name=legal_name,
nick_name=nick_name, date_of_birth=date_of_birth,
is_enrolled=is_enrolled, final_grade=final_grade,
course_info=course_info, labels=labels)
cls._put_profile(profile)
if not profile_only:
student.put()
class StudentCache(caching.RequestScopedSingleton):
"""Class that manages optimized loading of Students from datastore."""
def __init__(self):
self._key_name_to_student = {}
@classmethod
def _key(cls, user_id):
"""Make key specific to user_id and current namespace."""
return '%s-%s' % (MemcacheManager.get_namespace(), user_id)
def _get_by_user_id_from_datastore(self, user_id):
"""Load Student by user_id. Fail if user_id is not unique."""
# In the CB 1.8 and below email was the key_name. This is no longer
# true. To support legacy Student entities do a double look up here:
# first by the key_name value and then by the user_id field value.
student = Student.get_by_key_name(user_id)
if student:
return student
students = Student.all().filter(
Student.user_id.name, user_id).fetch(limit=2)
if len(students) > 1:
raise Exception(
'There is more than one student with user_id "%s"' % user_id)
return students[0] if students else None
def _get_by_user_id(self, user_id):
"""Get cached Student with user_id or load one from datastore."""
key = self._key(user_id)
if key in self._key_name_to_student:
return self._key_name_to_student[key]
student = self._get_by_user_id_from_datastore(user_id)
self._key_name_to_student[key] = student
return student
def _remove(self, user_id):
"""Remove cached value by user_id."""
key = self._key(user_id)
if key in self._key_name_to_student:
del self._key_name_to_student[key]
@classmethod
def remove(cls, user_id):
# pylint: disable=protected-access
return cls.instance()._remove(user_id)
@classmethod
def get_by_user_id(cls, user_id):
# pylint: disable=protected-access
return cls.instance()._get_by_user_id(user_id)
class _EmailProperty(db.StringProperty):
"""Class that provides dual look up of email property value."""
def __get__(self, model_instance, model_class):
if model_instance is None:
return self
# Try to get and use the actual stored value.
value = super(_EmailProperty, self).__get__(model_instance, model_class)
if value:
return value
# In CB 1.8 and before email was used as a key to Student entity. This
# is no longer true. Here we provide the backwards compatibility for the
# legacy Student instances. If user_id and key.name match, it means that
# the user_id is a key and we should not use key as email and email is
# not set. If key.name and user_id don't match, the key is also an
if model_instance.is_saved():
user_id = model_instance.user_id
key_name = model_instance.key().name()
if key_name != user_id:
return key_name
return None
class _ReadOnlyStringProperty(db.StringProperty):
"""Class that provides set once read-only property."""
def __set__(self, model_instance, value):
if model_instance:
validated_value = self.validate(value)
current_value = self.get_value_for_datastore(model_instance)
if current_value and current_value != validated_value:
raise ValueError(
'Unable to change set once read-only property '
'%s.' % self.name)
super(_ReadOnlyStringProperty, self).__set__(model_instance, value)
class Student(BaseEntity):
"""Student data specific to a course instance.
This entity represents a student in a specific course. It has somewhat
complex key_name/user_id/email behavior that comes from the legacy mistakes.
Current and historical behavior of this class is documented in detail below.
Let us start with the historical retrospect. In CB 1.8 and below:
- key_name
- is email for new users
- email
- is used as key_name
- is unique
- is immutable
- user_id
- was introduced in CB 1.2.0
- was an independent field and not a key_name
- held google.appengine.api.users.get_current_user().user_id()
- mutations were not explicitly prevented, but no mutations are
known to have occurred
- was used as foreign key in other tables
Anyone who attempts federated identity will find use of email as key_name
completely unacceptable. So we decided to make user_id a key_name.
The ideal solution would have been:
- store user_id as key_name for new and legacy users
- store email as an independent mutable field
The ideal solution was rejected upon discussion. It required taking course
offline and running M/R or ETL job to modify key_name. All foreign key
relationships that used old key_name value would need to be fixed up to use
new value. ETL would also need to be aware of different key structure before
and after CB 1.8. All in all the ideal approach is complex, invasive and
error prone. It was ultimately rejected.
The currently implemented solution is:
- key_name
- == user_id for new users
- == email for users created in CB 1.8 and below
- user_id
- is used as key_name
- is immutable independent field
- holds google.appengine.api.users.get_current_user().user_id()
- historical convention is to use user_id as foreign key in other
tables
- email
- is an independent field
- is mutable
This solution is a bit complex, but provides all of the behaviors of the
ideal solution. It is 100% backwards compatible and does not require offline
upgrade step. For example, if student, who registered and unregistered in
CB 1.8 or below, now re-registers, we will reactivate his original student
entry thus keeping all prior progress (modulo data-removal policy).
The largest new side effects is:
- there may be several users with the same email in one course
If email uniqueness is desired it needs to be added on separately.
We automatically execute core CB functional tests under both the old and the
new key_name logic. This is done in LegacyEMailAsKeyNameTest. The exact
interplay of key_name/user_id/email is tested in StudentKeyNameTest. We also
manually ran the entire test suite with _LEGACY_EMAIL_AS_KEY_NAME_ENABLED
set to True and all test passed, except test_rpc_performance in the class
tests.functional.modules_i18n_dashboard.SampleCourseLocalizationTest. This
failure is expected as we have extra datastore lookup in get() by email.
We did not optimize get() by email RPC performance as this call is used
rarely. To optimize dual datastore lookup in get_by_user_id() we tried
memcache and request-scope cache for Student. Request-scoped cache provided
much better results and this is what we implemented.
We are confident that core CB components, including peer review system, use
user_id as foreign key and will continue working with no changes. Any custom
components that used email as foreign key they will stop working and will
require modifications and an upgrade step.
TODO(psimakov):
- review how email changes propagate between global and per-course
namespaces
Good luck!
"""
enrolled_on = db.DateTimeProperty(auto_now_add=True, indexed=True)
user_id = _ReadOnlyStringProperty(indexed=True)
email = _EmailProperty(indexed=True)
name = db.StringProperty(indexed=False)
additional_fields = db.TextProperty(indexed=False)
is_enrolled = db.BooleanProperty(indexed=False)
last_seen_on = db.DateTimeProperty(indexed=True)
scores = db.TextProperty(indexed=False)
labels = db.StringProperty(indexed=False)
group_id = db.IntegerProperty(indexed=True)
_LEGACY_EMAIL_AS_KEY_NAME_ENABLED = False
_PROPERTY_EXPORT_BLACKLIST = [
additional_fields,
args):
super(Student, self).__init__(*args, **kwargs)
self._federated_email_cached = False
self._federated_email_value = None
@classmethod
def safe_key(cls, db_key, transform_fn):
return db.Key.from_path(cls.kind(), transform_fn(db_key.id_or_name()))
@classmethod
def _add_new(cls, user_id, email):
if cls._LEGACY_EMAIL_AS_KEY_NAME_ENABLED:
return Student(key_name=email, email=email, user_id=user_id)
else:
return Student(
key_name=user_id, email=email.lower(), user_id=user_id)
def for_export(self, transform_fn):
"""Creates an ExportEntity populated from this entity instance."""
assert not hasattr(self, 'key_by_user_id')
model = super(Student, self).for_export(transform_fn)
model.user_id = transform_fn(self.user_id)
# Add a version of the key that always uses the user_id for the name
# component. This can be used to establish relationships between objects
# where the student key used was created via get_key(). In general,
# this means clients will join exports on this field, not the field made
# from safe_key().
model.key_by_user_id = self.get_key(transform_fn=transform_fn)
return model
@property
def federated_email(self):
"""Gets the federated email address of the student.
This always returns None unless federated authentication is enabled and
the federated authentication implementation implements an email
resolver. See common.users.FederatedEmailResolver.
"""
if not self._federated_email_cached:
manager = users.UsersServiceManager.get()
resolver = manager.get_federated_email_resolver_class()
assert resolver
self._federated_email_value = (
resolver.get(self.user_id) if self.user_id else None)
self._federated_email_cached = True
return self._federated_email_value
@property
def is_transient(self):
return False
@property
def profile(self):
return StudentProfileDAO.get_profile_by_user_id(self.user_id)
def put(self):
"""Do the normal put() and also add the object to cache."""
StudentCache.remove(self.user_id)
return super(Student, self).put()
def delete(self):
"""Do the normal delete() and also remove the object from cache."""
StudentCache.remove(self.user_id)
super(Student, self).delete()
@classmethod
def add_new_student_for_current_user(
cls, nick_name, additional_fields, handler, labels=None):
StudentProfileDAO.add_new_student_for_current_user(
nick_name, additional_fields, handler, labels)
@classmethod
def get_first_by_email(cls, email):
"""Get the first student matching requested email.
Returns:
A tuple: (Student, unique). The first value is the first student
object with requested email. The second value is set to True if only
exactly one student has this email on record; False otherwise.
"""
# In the CB 1.8 and below email was the key_name. This is no longer
# true. To support legacy Student entities do a double look up here:
# first by the key_name value and then by the email field value.
student = cls.get_by_key_name(email)
if student:
return (student, True)
students = cls.all().filter(cls.email.name, email).fetch(limit=2)
if not students:
return (None, False)
return (students[0], len(students) == 1)
@classmethod
def get_by_user(cls, user):
return cls.get_by_user_id(user.user_id())
@classmethod
def get_enrolled_student_by_user(cls, user):
"""Returns enrolled student or None."""
student = cls.get_by_user_id(user.user_id())
if student and student.is_enrolled:
return student
return None
@classmethod
def is_email_in_use(cls, email):
"""Checks if an email is in use by one of existing student."""
if cls.all().filter(cls.email.name, email).fetch(limit=1):
return True
return False
@classmethod
def _get_user_and_student(cls):
"""Loads user and student and asserts both are present."""
user = users.get_current_user()
if not user:
raise Exception('No current user.')
student = Student.get_by_user(user)
if not student:
raise Exception('Student instance corresponding to user_id %s not '
'found.' % user.user_id())
return user, student
@classmethod
def rename_current(cls, new_name):
"""Gives student a new name."""
_, student = cls._get_user_and_student()
StudentProfileDAO.update(
student.user_id, student.email, nick_name=new_name)
@classmethod
def set_enrollment_status_for_current(cls, is_enrolled):
"""Changes student enrollment status."""
_, student = cls._get_user_and_student()
StudentProfileDAO.update(
student.user_id, student.email, is_enrolled=is_enrolled)
@classmethod
def set_labels_for_current(cls, labels):
"""Set labels for tracks on the student."""
_, student = cls._get_user_and_student()
StudentProfileDAO.update(
student.user_id, student.email, labels=labels)
def get_key(self, transform_fn=None):
"""Gets a version of the key that uses user_id for the key name."""
if not self.user_id:
raise Exception('Student instance has no user_id set.')
user_id = transform_fn(self.user_id) if transform_fn else self.user_id
return db.Key.from_path(Student.kind(), user_id)
@classmethod
def get_by_user_id(cls, user_id):
"""Get object from datastore with the help of cache."""
return StudentCache.get_by_user_id(user_id)
@classmethod
def delete_by_user_id(cls, user_id):
student = cls.get_by_user_id(user_id)
if student:
student.delete()
def has_same_key_as(self, key):
"""Checks if the key of the student and the given key are equal."""
return key == self.get_key()
def get_labels_of_type(self, label_type):
if not self.labels:
return set()
label_ids = LabelDAO.get_set_of_ids_of_type(label_type)
return set([int(label) for label in
common_utils.text_to_list(self.labels)
if int(label) in label_ids])
def update_last_seen_on(self, now=None, value=None):
"""Updates last_seen_on.
Args:
now: datetime.datetime.utcnow or None. Injectable for tests only.
value: datetime.datetime.utcnow or None. Injectable for tests only.
"""
now = now if now is not None else datetime.datetime.utcnow()
value = value if value is not None else now
if self._should_update_last_seen_on(value):
self.last_seen_on = value
self.put()
StudentCache.remove(self.user_id)
def _should_update_last_seen_on(self, value):
if self.last_seen_on is None:
return True
return (
(value - self.last_seen_on).total_seconds() >
STUDENT_LAST_SEEN_ON_UPDATE_SEC)
class TransientStudent(object):
"""A transient student (i.e. a user who hasn't logged in or registered)."""
@property
def is_transient(self):
return True
@property
def is_enrolled(self):
return False
@property
def scores(self):
return {}
class EventEntity(BaseEntity):
"""Generic events.
Each event has a 'source' that defines a place in a code where the event was
recorded. Each event has a 'user_id' to represent an actor who triggered
the event. The event 'data' is a JSON object, the format of which is defined
elsewhere and depends on the type of the event.
When extending this class, be sure to register your new class with
models.data_removal.Registry so that instances can be cleaned up on user
un-registration.
"""
recorded_on = db.DateTimeProperty(auto_now_add=True, indexed=True)
source = db.StringProperty(indexed=False)
user_id = db.StringProperty(indexed=False)
data = db.TextProperty(indexed=False)
EVENT_LISTENERS = []
@classmethod
@db.non_transactional
def _run_record_hooks(cls, source, user, data_dict):
for listener in cls.EVENT_LISTENERS:
try:
listener(source, user, data_dict)
except Exception:
logging.exception(
'Event record hook failed: %s, %s, %s',
source, user.user_id(), data_dict)
@classmethod
def record(cls, source, user, data, user_id=None):
"""Records new event into a datastore."""
data_dict = transforms.loads(data)
cls._run_record_hooks(source, user, data_dict)
data = transforms.dumps(data_dict)
event = cls()
event.source = source
event.user_id = user_id if user_id else user.user_id()
event.data = data
event.put()
def for_export(self, transform_fn):
model = super(EventEntity, self).for_export(transform_fn)
model.user_id = transform_fn(self.user_id)
return model
def get_user_ids(self):
return [self.user_id]
class StudentAnswersEntity(BaseEntity):
"""Student answers to the assessments."""
updated_on = db.DateTimeProperty(indexed=True)
data = db.TextProperty(indexed=False)
@classmethod
def safe_key(cls, db_key, transform_fn):
return db.Key.from_path(cls.kind(), transform_fn(db_key.id_or_name()))
class StudentPropertyEntity(BaseEntity):
"""A property of a student, keyed by the string STUDENT_ID-PROPERTY_NAME.
When extending this class, be sure to register your new class with
models.data_removal.Registry so that instances can be cleaned up on user
un-registration. See an example of how to do that at the bottom of this
file.
"""
updated_on = db.DateTimeProperty(indexed=True)
name = db.StringProperty()
value = db.TextProperty()
@classmethod
def _memcache_key(cls, key):
"""Makes a memcache key from primary key."""
return 'entity:student_property:%s' % key
@classmethod
def create_key(cls, student_id, property_name):
return '%s-%s' % (student_id, property_name)
@classmethod
def create(cls, student, property_name):
return cls(
key_name=cls.create_key(student.user_id, property_name),
name=property_name)
@classmethod
def safe_key(cls, db_key, transform_fn):
user_id, name = db_key.name().split('-', 1)
return db.Key.from_path(
cls.kind(), '%s-%s' % (transform_fn(user_id), name))
def put(self):
"""Do the normal put() and also add the object to memcache."""
result = super(StudentPropertyEntity, self).put()
MemcacheManager.set(self._memcache_key(self.key().name()), self)
return result
def delete(self):
"""Do the normal delete() and also remove the object from memcache."""
super(StudentPropertyEntity, self).delete()
MemcacheManager.delete(self._memcache_key(self.key().name()))
@classmethod
def get(cls, student, property_name):
"""Loads student property."""
key = cls.create_key(student.user_id, property_name)
value = MemcacheManager.get(cls._memcache_key(key))
if NO_OBJECT == value:
return None
if not value:
value = cls.get_by_key_name(key)
if value:
MemcacheManager.set(cls._memcache_key(key), value)
else:
MemcacheManager.set(cls._memcache_key(key), NO_OBJECT)
return value
class BaseJsonDao(object):
"""Base DAO class for entities storing their data in a single JSON blob."""
class EntityKeyTypeId(object):
@classmethod
def get_entity_by_key(cls, entity_class, key):
return entity_class.get_by_id(int(key))
@classmethod
def new_entity(cls, entity_class, unused_key):
return entity_class()
class EntityKeyTypeName(object):
@classmethod
def get_entity_by_key(cls, entity_class, key):
return entity_class.get_by_key_name(key)
@classmethod
def new_entity(cls, entity_class, key_name):
return entity_class(key_name=key_name)
@classmethod
def _memcache_key(cls, obj_id):
"""Makes a memcache key from datastore id."""
return '(entity:%s:%s)' % (cls.ENTITY.kind(), obj_id)
@classmethod
def _memcache_all_key(cls):
"""Makes a memcache key for caching get_all()."""
return '(entity-get-all:%s)' % cls.ENTITY.kind()
@classmethod
def get_all_mapped(cls):
entities = MemcacheManager.get(cls._memcache_all_key())
if entities is not None and entities != NO_OBJECT:
cls._maybe_apply_post_load_hooks(entities.itervalues())
return entities
result = {dto.id: dto for dto in cls.get_all_iter()}
result_to_cache = NO_OBJECT
if result:
result_to_cache = result
MemcacheManager.set(cls._memcache_all_key(), result_to_cache)
cls._maybe_apply_post_load_hooks(result.itervalues())
return result
@classmethod
def get_all(cls):
return cls.get_all_mapped().values()
@classmethod
def get_all_iter(cls):
"""Return a generator that will produce all DTOs of a given type.
Yields:
A DTO for each row in the Entity type's table.
"""
prev_cursor = None
any_records = True
while any_records:
any_records = False
query = cls.ENTITY.all().with_cursor(prev_cursor)
for entity in query.run():
any_records = True
yield cls.DTO(entity.key().id_or_name(),
transforms.loads(entity.data))
prev_cursor = query.cursor()
@classmethod
def _maybe_apply_post_load_hooks(cls, dto_list):
"""Run any post-load processing hooks.
Modules may insert post-load processing hooks (e.g. for i18n
translation) into the list POST_LOAD_HOOKS defined on the DAO class.
If the class has this list and any hook functions are present, they
are passed the list of DTO's for in-place processing.
Args:
dto_list: list of DTO objects
"""
if hasattr(cls, 'POST_LOAD_HOOKS'):
for hook in cls.POST_LOAD_HOOKS:
hook(dto_list)
@classmethod
def _maybe_apply_post_save_hooks(cls, dto_and_id_list):
"""Run any post-save processing hooks.
Modules may insert post-save processing hooks (e.g. for i18n
translation) into the list POST_SAVE_HOOKS defined on the DAO class.
If the class has this list and any hook functions are present, they
are passed the list of DTO's for in-place processing.
Args:
dto_and_id_list: list of pairs of (id, DTO) objects
"""
dto_list = [
cls.DTO(dto_id, orig_dto.dict)
for dto_id, orig_dto in dto_and_id_list]
if hasattr(cls, 'POST_SAVE_HOOKS'):
common_utils.run_hooks(cls.POST_SAVE_HOOKS, dto_list)
@classmethod
def _load_entity(cls, obj_id):
if not obj_id:
return None
memcache_key = cls._memcache_key(obj_id)
entity = MemcacheManager.get(memcache_key)
if NO_OBJECT == entity:
return None
if not entity:
entity = cls.ENTITY_KEY_TYPE.get_entity_by_key(cls.ENTITY, obj_id)
if entity:
MemcacheManager.set(memcache_key, entity)
else:
MemcacheManager.set(memcache_key, NO_OBJECT)
return entity
@classmethod
def load(cls, obj_id):
entity = cls._load_entity(obj_id)
if entity:
dto = cls.DTO(obj_id, transforms.loads(entity.data))
cls._maybe_apply_post_load_hooks([dto])
return dto
else:
return None
@classmethod
@appengine_config.timeandlog('Models.bulk_load')
def bulk_load(cls, obj_id_list):
# fetch from memcache
memcache_keys = [cls._memcache_key(obj_id) for obj_id in obj_id_list]
memcache_entities = MemcacheManager.get_multi(memcache_keys)
# fetch missing from datastore
both_keys = zip(obj_id_list, memcache_keys)
datastore_keys = [
obj_id for obj_id, memcache_key in both_keys
if memcache_key not in memcache_entities]
if datastore_keys:
datastore_entities = dict(zip(
datastore_keys, get([
db.Key.from_path(cls.ENTITY.kind(), obj_id)
for obj_id in datastore_keys])))
else:
datastore_entities = {}
# weave the results together
ret = []
memcache_update = {}
dtos_for_post_hooks = []
for obj_id, memcache_key in both_keys:
entity = datastore_entities.get(obj_id)
if entity is not None:
dto = cls.DTO(obj_id, transforms.loads(entity.data))
ret.append(dto)
dtos_for_post_hooks.append(dto)
memcache_update[memcache_key] = entity
elif memcache_key not in memcache_entities:
ret.append(None)
memcache_update[memcache_key] = NO_OBJECT
else:
entity = memcache_entities[memcache_key]
if NO_OBJECT == entity:
ret.append(None)
else:
ret.append(cls.DTO(obj_id, transforms.loads(entity.data)))
# run hooks
cls._maybe_apply_post_load_hooks(dtos_for_post_hooks)
# put into memcache
if datastore_entities:
MemcacheManager.set_multi(memcache_update)
return ret
@classmethod
def _create_if_necessary(cls, dto):
entity = cls._load_entity(dto.id)
if not entity:
entity = cls.ENTITY_KEY_TYPE.new_entity(cls.ENTITY, dto.id)
entity.data = transforms.dumps(dto.dict)
return entity
@classmethod
def before_put(cls, dto, entity):
pass
@classmethod
def save(cls, dto):
entity = cls._create_if_necessary(dto)
cls.before_put(dto, entity)
entity.put()
MemcacheManager.delete(cls._memcache_all_key())
id_or_name = entity.key().id_or_name()
MemcacheManager.set(cls._memcache_key(id_or_name), entity)
cls._maybe_apply_post_save_hooks([(id_or_name, dto)])
return id_or_name
@classmethod
def save_all(cls, dtos):
"""Performs a block persist of a list of DTO's."""
entities = []
for dto in dtos:
entity = cls._create_if_necessary(dto)
entities.append(entity)
cls.before_put(dto, entity)
keys = put(entities)
MemcacheManager.delete(cls._memcache_all_key())
for key, entity in zip(keys, entities):
MemcacheManager.set(cls._memcache_key(key.id_or_name()), entity)
id_or_name_list = [key.id_or_name() for key in keys]
cls._maybe_apply_post_save_hooks(zip(id_or_name_list, dtos))
return id_or_name_list
@classmethod
def delete(cls, dto):
entity = cls._load_entity(dto.id)
entity.delete()
MemcacheManager.delete(cls._memcache_all_key())
MemcacheManager.delete(cls._memcache_key(entity.key().id_or_name()))
@classmethod
def clone(cls, dto):
return cls.DTO(None, copy.deepcopy(dto.dict))
class LastModifiedJsonDao(BaseJsonDao):
"""Base DAO that updates the last_modified field of entities on every save.
DTOs managed by this DAO must have a settable field last_modified defined.
"""
@classmethod
def save(cls, dto):
dto.last_modified = time.time()
return super(LastModifiedJsonDao, cls).save(dto)
@classmethod
def save_all(cls, dtos):
for dto in dtos:
dto.last_modified = time.time()
return super(LastModifiedJsonDao, cls).save_all(dtos)
class QuestionEntity(BaseEntity):
"""An object representing a top-level question."""
data = db.TextProperty(indexed=False)
class QuestionDTO(object):
"""DTO for question entities."""
MULTIPLE_CHOICE = 0
SHORT_ANSWER = 1
def __init__(self, the_id, the_dict):
self.id = the_id
self.dict = the_dict
@property
def type(self):
return self.dict.get('type')
@type.setter
def type(self, value):
self.dict['type'] = value
@property
def description(self):
return self.dict.get('description') or ''
@description.setter
def description(self, value):
self.dict['description'] = value
@property
def last_modified(self):
return self.dict.get('last_modified') or ''
@last_modified.setter
def last_modified(self, value):
self.dict['last_modified'] = value
class QuestionDAO(LastModifiedJsonDao):
VERSION = '1.5'
DTO = QuestionDTO
ENTITY = QuestionEntity
ENTITY_KEY_TYPE = BaseJsonDao.EntityKeyTypeId
POST_LOAD_HOOKS = []
POST_SAVE_HOOKS = []
@classmethod
def used_by(cls, question_id):
"""Returns the question groups using a question.
Args:
question_id: int. Identifier of the question we're testing.
Returns:
List of question groups. The list of all question groups that use
the given question.
"""
# O(num_question_groups), but deserialization of 1 large group takes
# ~1ms so practically speaking latency is OK for the admin console.
matches = []
for group in QuestionGroupDAO.get_all():
# Add the group the same amount of times as it contains the question
matches.extend([group] * (
[long(x) for x in group.question_ids].count(long(question_id))
))
return matches
@classmethod
def create_question(cls, question_dict, question_type):
question = cls.DTO(None, question_dict)
question.type = question_type
return cls.save(question)
@classmethod
def get_questions_descriptions(cls):
return set([q.description for q in cls.get_all()])
@classmethod
def validate_unique_description(cls, description):
if description in cls.get_questions_descriptions():
raise CollisionError(
'Non-unique question description: %s' % description)
return None
class QuestionImporter(object):
"""Helper class for converting ver. 1.2 questoins to ver. 1.3 ones."""
@classmethod
def _gen_description(cls, unit, lesson_title, question_number):
return (
'Unit "%s", lesson "%s" (question
unit.title, lesson_title, question_number))
@classmethod
def import_freetext(cls, question, description, task):
QuestionDAO.validate_unique_description(description)
try:
response = question.get('correctAnswerRegex')
# Regex /.*/ is added as a guard for questions with no answer.
response = response.value if response else '/.*/'
return {
'version': QuestionDAO.VERSION,
'description': description,
'question': task,
'hint': question['showAnswerOutput'],
'graders': [{
'score': 1.0,
'matcher': 'regex',
'response': response,
'feedback': question.get('correctAnswerOutput', '')
}],
'defaultFeedback': question.get('incorrectAnswerOutput', '')}
except KeyError as e:
raise ValidationError('Invalid question: %s, %s' % (description, e))
@classmethod
def import_question(
cls, question, unit, lesson_title, question_number, task):
question_type = question['questionType']
task = ''.join(task)
description = cls._gen_description(unit, lesson_title, question_number)
if question_type == 'multiple choice':
question_dict = cls.import_multiple_choice(
question, description, task)
qid = QuestionDAO.create_question(
question_dict, QuestionDAO.DTO.MULTIPLE_CHOICE)
elif question_type == 'freetext':
question_dict = cls.import_freetext(question, description, task)
qid = QuestionDAO.create_question(
question_dict, QuestionDTO.SHORT_ANSWER)
elif question_type == 'multiple choice group':
question_group_dict = cls.import_multiple_choice_group(
question, description, unit, lesson_title, question_number,
task)
qid = QuestionGroupDAO.create_question_group(question_group_dict)
else:
raise ValueError('Unknown question type: %s' % question_type)
return (qid, common_utils.generate_instance_id())
@classmethod
def import_multiple_choice(cls, question, description, task):
QuestionDAO.validate_unique_description(description)
task = ''.join(task) if task else ''
qu_dict = {
'version': QuestionDAO.VERSION,
'description': description,
'question': task,
'multiple_selections': False,
'choices': [
{
'text': choice[0],
'score': 1.0 if choice[1].value else 0.0,
'feedback': choice[2]
} for choice in question['choices']]}
# Add optional fields
if 'defaultFeedback' in question:
qu_dict['defaultFeedback'] = question['defaultFeedback']
if 'permute_choices' in question:
qu_dict['permute_choices'] = question['permute_choices']
if 'show_answer_when_incorrect' in question:
qu_dict['show_answer_when_incorrect'] = (
question['show_answer_when_incorrect'])
if 'all_or_nothing_grading' in question:
qu_dict['all_or_nothing_grading'] = (
question['all_or_nothing_grading'])
return qu_dict
@classmethod
def import_multiple_choice_group(
cls, group, description, unit, lesson_title, question_number, task):
"""Import a 'multiple choice group' as a question group."""
QuestionGroupDAO.validate_unique_description(description)
question_group_dict = {
'version': QuestionDAO.VERSION,
'description': description,
'introduction': task}
question_list = []
for index, question in enumerate(group['questionsList']):
description = (
'Unit "%s", lesson "%s" (question % (unit.title, lesson_title, question_number, index + 1))
question_dict = cls.import_multiple_choice_group_question(
question, description)
question = QuestionDTO(None, question_dict)
question.type = QuestionDTO.MULTIPLE_CHOICE
question_list.append(question)
qid_list = QuestionDAO.save_all(question_list)
question_group_dict['items'] = [{
'question': quid,
'weight': 1.0} for quid in qid_list]
return question_group_dict
@classmethod
def import_multiple_choice_group_question(cls, orig_question, description):
"""Import the questions from a group as individual questions."""
QuestionDAO.validate_unique_description(description)
# TODO(jorr): Handle allCorrectOutput and someCorrectOutput
correct_index = orig_question['correctIndex']
multiple_selections = not isinstance(correct_index, int)
if multiple_selections:
partial = 1.0 / len(correct_index)
choices = [{
'text': text,
'score': partial if i in correct_index else -1.0
} for i, text in enumerate(orig_question['choices'])]
else:
choices = [{
'text': text,
'score': 1.0 if i == correct_index else 0.0
} for i, text in enumerate(orig_question['choices'])]
return {
'version': QuestionDAO.VERSION,
'description': description,
'question': orig_question.get('questionHTML') or '',
'multiple_selections': multiple_selections,
'choices': choices}
@classmethod
def build_short_answer_question_dict(cls, question_html, matcher, response):
return {
'version': QuestionDAO.VERSION,
'question': question_html or '',
'graders': [{
'score': 1.0,
'matcher': matcher,
'response': response,
}]
}
@classmethod
def build_multiple_choice_question_dict(cls, question):
"""Assemble the dict for a multiple choice question."""
question_dict = {
'version': QuestionDAO.VERSION,
'question': question.get('questionHTML') or '',
'multiple_selections': False
}
choices = []
for choice in question.get('choices'):
if isinstance(choice, basestring):
text = choice
score = 0.0
else:
text = choice.value
score = 1.0
choices.append({
'text': text,
'score': score
})
question_dict['choices'] = choices
return question_dict
@classmethod
def import_assessment_question(cls, question):
if 'questionHTML' in question:
question['questionHTML'] = question['questionHTML'].decode(
'string-escape')
# Convert a single question into a QuestioDTO.
if 'choices' in question:
q_dict = cls.build_multiple_choice_question_dict(
question)
question_type = QuestionDTO.MULTIPLE_CHOICE
elif 'correctAnswerNumeric' in question:
q_dict = cls.build_short_answer_question_dict(
question.get('questionHTML'),
'numeric',
question.get('correctAnswerNumeric'))
question_type = QuestionDTO.SHORT_ANSWER
elif 'correctAnswerString' in question:
q_dict = cls.build_short_answer_question_dict(
question.get('questionHTML'),
'case_insensitive',
question.get('correctAnswerString'))
question_type = QuestionDTO.SHORT_ANSWER
elif 'correctAnswerRegex' in question:
q_dict = cls.build_short_answer_question_dict(
question.get('questionHTML'),
'regex',
question.get('correctAnswerRegex').value)
question_type = QuestionDTO.SHORT_ANSWER
else:
raise ValueError('Unknown question type')
question_dto = QuestionDTO(None, q_dict)
question_dto.type = question_type
return question_dto
@classmethod
def build_question_dtos(cls, assessment_dict, template, unit, errors):
"""Convert the assessment into a list of QuestionDTO's."""
descriptions = QuestionDAO.get_questions_descriptions()
question_dtos = []
try:
for i, q in enumerate(assessment_dict['questionsList']):
description = template % (unit.title, (i + 1))
if description in descriptions:
raise CollisionError(
'Non-unique question description: %s' % description)
question_dto = cls.import_assessment_question(q)
question_dto.dict['description'] = description
question_dtos.append(question_dto)
except CollisionError:
errors.append(
'This assessment has already been imported. Remove '
'duplicate questions from the question bank in '
'order to re-import: %s.' % description)
return None
except Exception as ex:
errors.append('Unable to convert: %s' % ex)
return None
return question_dtos
class QuestionGroupEntity(BaseEntity):
"""An object representing a question group in the datastore."""
data = db.TextProperty(indexed=False)
class QuestionGroupDTO(object):
"""Data transfer object for question groups."""
def __init__(self, the_id, the_dict):
self.id = the_id
self.dict = the_dict
@property
def description(self):
return self.dict.get('description') or ''
@property
def introduction(self):
return self.dict.get('introduction') or ''
@property
def question_ids(self):
return [item['question'] for item in self.dict.get('items', [])]
@property
def items(self):
return copy.deepcopy(self.dict.get('items', []))
def add_question(self, question_id, weight):
self.dict['items'].append({'question': question_id, 'weight': weight})
@property
def last_modified(self):
return self.dict.get('last_modified') or ''
@last_modified.setter
def last_modified(self, value):
self.dict['last_modified'] = value
class QuestionGroupDAO(LastModifiedJsonDao):
DTO = QuestionGroupDTO
ENTITY = QuestionGroupEntity
ENTITY_KEY_TYPE = BaseJsonDao.EntityKeyTypeId
POST_LOAD_HOOKS = []
POST_SAVE_HOOKS = []
@classmethod
def get_question_groups_descriptions(cls):
return set([g.description for g in cls.get_all()])
@classmethod
def create_question_group(cls, question_group_dict):
question_group = QuestionGroupDTO(None, question_group_dict)
return cls.save(question_group)
@classmethod
def validate_unique_description(cls, description):
if description in cls.get_question_groups_descriptions():
raise CollisionError(
'Non-unique question group description: %s' % description)
class LabelEntity(BaseEntity):
"""A class representing labels that can be applied to Student, Unit, etc."""
data = db.TextProperty(indexed=False)
MEMCACHE_KEY = 'labels'
_PROPERTY_EXPORT_BLACKLIST = []
def put(self):
"""Save the content to the datastore.
To support caching the list of all labels, we must invalidate
the cache on any change to any label.
Returns:
Value of entity as modified by put() (i.e., key setting)
"""
result = super(LabelEntity, self).put()
MemcacheManager.delete(self.MEMCACHE_KEY)
return result
def delete(self):
"""Remove a label from the datastore.
To support caching the list of all labels, we must invalidate
the cache on any change to any label.
"""
super(LabelEntity, self).delete()
MemcacheManager.delete(self.MEMCACHE_KEY)
class LabelDTO(object):
LABEL_TYPE_GENERAL = 0
LABEL_TYPE_COURSE_TRACK = 1
LABEL_TYPE_LOCALE = 2
LabelType = collections.namedtuple(
'LabelType', ['type', 'name', 'title', 'menu_order'])
USER_EDITABLE_LABEL_TYPES = [
LabelType(LABEL_TYPE_GENERAL, 'general', 'General', 0),
LabelType(LABEL_TYPE_COURSE_TRACK, 'course_track', 'Course Track', 1),
]
SYSTEM_EDITABLE_LABEL_TYPES = [
LabelType(LABEL_TYPE_LOCALE, 'locale', 'Language', 2),
]
LABEL_TYPES = USER_EDITABLE_LABEL_TYPES + SYSTEM_EDITABLE_LABEL_TYPES
def __init__(self, the_id, the_dict):
self.id = the_id
self.dict = the_dict
@property
def title(self):
return self.dict.get('title', '')
@property
def description(self):
return self.dict.get('description', '')
@property
def type(self):
return self.dict.get('type', self.LABEL_TYPE_GENERAL)
class LabelManager(caching.RequestScopedSingleton):
"""Class that manages optimized loading of I18N data from datastore."""
def __init__(self):
self._key_to_label = None
def _preload(self):
self._key_to_label = {}
for row in LabelDAO.get_all_iter():
self._key_to_label[row.id] = row
def _get_all(self):
if self._key_to_label is None:
self._preload()
return self._key_to_label.values()
@classmethod
def get_all(cls):
return cls.instance()._get_all()
class LabelDAO(BaseJsonDao):
DTO = LabelDTO
ENTITY = LabelEntity
ENTITY_KEY_TYPE = BaseJsonDao.EntityKeyTypeId
@classmethod
def get_all(cls):
items = LabelManager.get_all()
order = {lt.type: lt.menu_order for lt in LabelDTO.LABEL_TYPES}
return sorted(items, key=lambda l: (order[l.type], l.title))
@classmethod
def get_all_of_type(cls, label_type):
return [label for label in cls.get_all()
if label.type == label_type]
@classmethod
def get_set_of_ids_of_type(cls, label_type):
return set([label.id for label in cls.get_all_of_type(label_type)])
@classmethod
def _apply_locale_labels_to_locale(cls, locale, items):
"""Filter out items not matching locale labels and current locale."""
if locale:
id_to_label = {}
for label in LabelDAO.get_all_of_type(
LabelDTO.LABEL_TYPE_LOCALE):
id_to_label[int(label.id)] = label
for item in list(items):
item_matches = set([int(label_id) for label_id in
common_utils.text_to_list(item.labels)
if int(label_id) in id_to_label.keys()])
found = False
for item_match in item_matches:
label = id_to_label[item_match]
if id_to_label and label and label.title == locale:
found = True
if id_to_label and item_matches and not found:
items.remove(item)
return items
@classmethod
def apply_course_track_labels_to_student_labels(
cls, course, student, items):
MemcacheManager.begin_readonly()
try:
items = cls._apply_labels_to_student_labels(
LabelDTO.LABEL_TYPE_COURSE_TRACK, student, items)
if course.get_course_setting('can_student_change_locale'):
return cls._apply_locale_labels_to_locale(
course.app_context.get_current_locale(), items)
else:
return cls._apply_labels_to_student_labels(
LabelDTO.LABEL_TYPE_LOCALE, student, items)
finally:
MemcacheManager.end_readonly()
@classmethod
def _apply_labels_to_student_labels(cls, label_type, student, items):
"""Filter out items whose labels don't match those on the student.
If the student has no labels, all items are taken.
Similarly, if a item has no labels, it is included.
Args:
label_type: a label types to consider.
student: the logged-in Student matching the user for this request.
items: a list of item instances, each having 'labels' attribute.
Returns:
A list of item instances whose labels match those on the student.
"""
label_ids = LabelDAO.get_set_of_ids_of_type(label_type)
if student and not student.is_transient:
student_matches = student.get_labels_of_type(label_type)
for item in list(items):
item_matches = set([int(label_id) for label_id in
common_utils.text_to_list(item.labels)
if int(label_id) in label_ids])
if (student_matches and item_matches and
student_matches.isdisjoint(item_matches)):
items.remove(item)
return items
class StudentPreferencesEntity(BaseEntity):
"""A class representing an individual's preferences for a course.
Note that here, we are using "Student" in the broadest sense possible:
some human associated with a course. This basically means that we want to
support preferences that are relevant to a student's view of a course, as
well as a course administrator's preferences. These will be saved in the
same object but will be edited in different editors, appropriate to the
scope of the particular field in the DTO. For example, show_hooks and
show_jinja_context are edited in the Dashboard, in
modules/dashboard/admin_preferences_editor.py
while locale is set by an Ajax widget in base.html.
Note that this type is indexed by "name" -- the key is the same as
that of the user.get_current_user().user_id(), which is a string.
This type is course-specific, so it must be accessed within a namespaced
context.
"""
data = db.TextProperty(indexed=False)
@classmethod
def safe_key(cls, db_key, transform_fn):
return db.Key.from_path(cls.kind(), transform_fn(db_key.name()))
class StudentPreferencesDTO(object):
def __init__(self, the_id, the_dict):
self.id = the_id
self.dict = the_dict
@property
def show_hooks(self):
"""Show controls to permit editing of HTML inclusions (hook points).
On course pages, there are various locations (hook points) at which
HTML content is inserted. Turn this setting on to see those locations
with controls that permit an admin to edit that HTML, and off to see
the content as a student would.
Returns:
True when admin wants to see edit controls, False when he doesn't.
"""
return self.dict.get('show_hooks', True)
@show_hooks.setter
def show_hooks(self, value):
self.dict['show_hooks'] = value
@property
def show_jinja_context(self):
"""Do/don't show dump of Jinja context on bottom of pages."""
return self.dict.get('show_jinja_context', False)
@show_jinja_context.setter
def show_jinja_context(self, value):
self.dict['show_jinja_context'] = value
@property
def locale(self):
return self.dict.get('locale')
@locale.setter
def locale(self, value):
self.dict['locale'] = value
@property
def last_location(self):
return self.dict.get('last_location')
@last_location.setter
def last_location(self, value):
self.dict['last_location'] = value
class StudentPreferencesDAO(BaseJsonDao):
DTO = StudentPreferencesDTO
ENTITY = StudentPreferencesEntity
ENTITY_KEY_TYPE = BaseJsonDao.EntityKeyTypeName
CURRENT_VERSION = '1.0'
@classmethod
def load_or_default(cls):
user = users.get_current_user()
if not user:
return None
user_id = user.user_id()
prefs = cls.load(user_id)
if not prefs:
prefs = StudentPreferencesDTO(
user_id, {
'version': cls.CURRENT_VERSION,
'show_hooks': False,
'show_jinja_context': False
})
return prefs
class RoleEntity(BaseEntity):
data = db.TextProperty(indexed=False)
class RoleDTO(object):
"""Data transfer object for roles."""
def __init__(self, the_id, the_dict):
self.id = the_id
self.dict = the_dict
@property
def name(self):
return self.dict.get('name', '')
@property
def description(self):
return self.dict.get('description', '')
@property
def users(self):
return self.dict.get('users', [])
@property
def permissions(self):
return self.dict.get('permissions', {})
class RoleDAO(BaseJsonDao):
DTO = RoleDTO
ENTITY = RoleEntity
ENTITY_KEY_TYPE = BaseJsonDao.EntityKeyTypeId
def get_global_handlers():
return [
(StudentLifecycleObserver.URL, StudentLifecycleObserver),
]
def register_for_data_removal():
data_removal.Registry.register_sitewide_indexed_by_user_id_remover(
StudentProfileDAO.delete_profile_by_user_id)
removers = [
Student.delete_by_user_id,
StudentAnswersEntity.delete_by_key,
StudentPropertyEntity.delete_by_user_id_prefix,
StudentPreferencesEntity.delete_by_key,
]
for remover in removers:
data_removal.Registry.register_indexed_by_user_id_remover(remover)
data_removal.Registry.register_unindexed_entity_class(EventEntity)
| false | true |
f71efea9b7b1591329208d4b70a055688fc34558 | 4,175 | py | Python | wrapper.py | crsqq/OpenNE | 0cecb2b5076b878d2f07ed1130682aeab6ce37f1 | [
"MIT"
] | null | null | null | wrapper.py | crsqq/OpenNE | 0cecb2b5076b878d2f07ed1130682aeab6ce37f1 | [
"MIT"
] | null | null | null | wrapper.py | crsqq/OpenNE | 0cecb2b5076b878d2f07ed1130682aeab6ce37f1 | [
"MIT"
] | null | null | null | from OpenNE.src.libnrl import graph
from OpenNE.src.libnrl import grarep
from OpenNE.src.libnrl import line
from OpenNE.src.libnrl import node2vec
from OpenNE.src.libnrl.gcn import gcnAPI
from itertools import product
import networkx as nx
import numpy as np
import tensorflow as tf
def nx_to_openne_graph(nxgraph, stringify_nodes=True):
dg = nx.to_directed(nxgraph).copy()
if stringify_nodes:
nx.relabel_nodes(dg, {n:str(n) for n in dg.nodes}, copy=False)
nx.set_edge_attributes(dg, 1.0, 'weight')
g = graph.Graph()
g.G = dg
g.encode_node()
return g
class OpenNEEmbeddingBase:
def __init__(self, thisgraph, parameters):
self.graph = nx_to_openne_graph(thisgraph)
self.embeddings = None
self.parameters = parameters
def run(self):
raise NotImplementedError('')
def update_parameters(self, new_parameters):
self.parameters = new_parameters
self.embeddings = None
def get_embeddings(self):
if not self.embeddings:
self.run()
return self.embeddings
def get_vectors(self):
return self.get_embeddings().vectors
@staticmethod
def valid_parameter_combinations(parameterSpace):
"""
returns all possible combinations, if some are not valid / useful,
this method needs to be overwritten
"""
all_combinations = product(*parameterSpace.values())
return [{k:v for k,v in zip(parameterSpace.keys(), combn)} for combn in all_combinations]
class Node2VecEmbedding(OpenNEEmbeddingBase):
"""
{'dim': 2, 'num_paths': 80, 'p': 1, 'path_length': 10, 'q': 1}
"""
def run(self):
self.embeddings = node2vec.Node2vec(self.graph, retrainable=True, **self.parameters)
def retrain(self, new_graph, num_paths=80, epochs=5):
g = nx_to_openne_graph(new_graph)
self.embeddings.retrain(g, num_paths=num_paths, epochs=epochs)
class GraRepEmbedding(OpenNEEmbeddingBase):
def run(self):
self.embeddings = grarep.GraRep(self.graph, **self.parameters)
@staticmethod
def valid_parameter_combinations(parameterSpace):
"""
returns all possible combinations, if some are not valid / useful,
this method needs to be overwritten
"""
all_combinations = product(*parameterSpace.values())
all_combinations = [{k:v for k,v in zip(parameterSpace.keys(), combn)} for combn in all_combinations]
return [x for x in all_combinations if x["dim"] % x["Kstep"] == 0]
class LINEEmbedding(OpenNEEmbeddingBase):
def run(self):
tf.reset_default_graph()
self.embeddings = line.LINE(self.graph, **self.parameters)
from scipy.sparse.linalg.eigen.arpack import eigsh as largest_eigsh
class SpectralClusteringEmbedding(OpenNEEmbeddingBase):
def __init__(self, thisgraph, parameters):
self.graph = thisgraph
self.embeddings = None
self.parameters = parameters
nx.relabel_nodes(self.graph, {n:str(n) for n in self.graph.nodes}, copy=False)
def run(self):
L = nx.normalized_laplacian_matrix(self.graph)
evalues, evectors = a,b = largest_eigsh(L, k=self.parameters['dim'])
self.embeddings = {str(n):v for n,v in zip(self.graph.nodes, evectors)}
def get_vectors(self):
return self.get_embeddings()
def _RandNE(graph, dim, q, beta):
d = dim
A = nx.to_scipy_sparse_matrix(graph)
R = np.random.normal(loc=0, scale=1/d, size=(A.shape[0], d))
U0, _ = np.linalg.qr(R)
Ulist = [U0]
for i in range(q):
Ulist.append(A.dot(Ulist[-1]))
Ulist = np.array(Ulist)
betas = (beta**np.arange(0, q+1))
U = np.array([scalar*m for scalar,m in zip(betas, Ulist)]).sum(axis=0)
return U
class RandNEEmbedding(OpenNEEmbeddingBase):
def __init__(self, thisgraph, parameters):
self.graph = thisgraph
self.embeddings = None
self.parameters = parameters
def run(self):
U = _RandNE(self.graph, **self.parameters)
self.embeddings = {str(n):v for n,v in zip(self.graph.nodes, U)}
def get_vectors(self):
return self.get_embeddings()
| 33.134921 | 109 | 0.668982 | from OpenNE.src.libnrl import graph
from OpenNE.src.libnrl import grarep
from OpenNE.src.libnrl import line
from OpenNE.src.libnrl import node2vec
from OpenNE.src.libnrl.gcn import gcnAPI
from itertools import product
import networkx as nx
import numpy as np
import tensorflow as tf
def nx_to_openne_graph(nxgraph, stringify_nodes=True):
dg = nx.to_directed(nxgraph).copy()
if stringify_nodes:
nx.relabel_nodes(dg, {n:str(n) for n in dg.nodes}, copy=False)
nx.set_edge_attributes(dg, 1.0, 'weight')
g = graph.Graph()
g.G = dg
g.encode_node()
return g
class OpenNEEmbeddingBase:
def __init__(self, thisgraph, parameters):
self.graph = nx_to_openne_graph(thisgraph)
self.embeddings = None
self.parameters = parameters
def run(self):
raise NotImplementedError('')
def update_parameters(self, new_parameters):
self.parameters = new_parameters
self.embeddings = None
def get_embeddings(self):
if not self.embeddings:
self.run()
return self.embeddings
def get_vectors(self):
return self.get_embeddings().vectors
@staticmethod
def valid_parameter_combinations(parameterSpace):
all_combinations = product(*parameterSpace.values())
return [{k:v for k,v in zip(parameterSpace.keys(), combn)} for combn in all_combinations]
class Node2VecEmbedding(OpenNEEmbeddingBase):
def run(self):
self.embeddings = node2vec.Node2vec(self.graph, retrainable=True, **self.parameters)
def retrain(self, new_graph, num_paths=80, epochs=5):
g = nx_to_openne_graph(new_graph)
self.embeddings.retrain(g, num_paths=num_paths, epochs=epochs)
class GraRepEmbedding(OpenNEEmbeddingBase):
def run(self):
self.embeddings = grarep.GraRep(self.graph, **self.parameters)
@staticmethod
def valid_parameter_combinations(parameterSpace):
all_combinations = product(*parameterSpace.values())
all_combinations = [{k:v for k,v in zip(parameterSpace.keys(), combn)} for combn in all_combinations]
return [x for x in all_combinations if x["dim"] % x["Kstep"] == 0]
class LINEEmbedding(OpenNEEmbeddingBase):
def run(self):
tf.reset_default_graph()
self.embeddings = line.LINE(self.graph, **self.parameters)
from scipy.sparse.linalg.eigen.arpack import eigsh as largest_eigsh
class SpectralClusteringEmbedding(OpenNEEmbeddingBase):
def __init__(self, thisgraph, parameters):
self.graph = thisgraph
self.embeddings = None
self.parameters = parameters
nx.relabel_nodes(self.graph, {n:str(n) for n in self.graph.nodes}, copy=False)
def run(self):
L = nx.normalized_laplacian_matrix(self.graph)
evalues, evectors = a,b = largest_eigsh(L, k=self.parameters['dim'])
self.embeddings = {str(n):v for n,v in zip(self.graph.nodes, evectors)}
def get_vectors(self):
return self.get_embeddings()
def _RandNE(graph, dim, q, beta):
d = dim
A = nx.to_scipy_sparse_matrix(graph)
R = np.random.normal(loc=0, scale=1/d, size=(A.shape[0], d))
U0, _ = np.linalg.qr(R)
Ulist = [U0]
for i in range(q):
Ulist.append(A.dot(Ulist[-1]))
Ulist = np.array(Ulist)
betas = (beta**np.arange(0, q+1))
U = np.array([scalar*m for scalar,m in zip(betas, Ulist)]).sum(axis=0)
return U
class RandNEEmbedding(OpenNEEmbeddingBase):
def __init__(self, thisgraph, parameters):
self.graph = thisgraph
self.embeddings = None
self.parameters = parameters
def run(self):
U = _RandNE(self.graph, **self.parameters)
self.embeddings = {str(n):v for n,v in zip(self.graph.nodes, U)}
def get_vectors(self):
return self.get_embeddings()
| true | true |
f71eff5ae0a308b7cbc26231bed0d1f82d0ac6ae | 4,630 | py | Python | examples/dfp/v201802/native_style_service/create_native_styles.py | christineyi3898/googleads-python-lib | cd707dc897b93cf1bbb19355f7424e7834e7fb55 | [
"Apache-2.0"
] | 1 | 2019-10-21T04:10:22.000Z | 2019-10-21T04:10:22.000Z | examples/dfp/v201802/native_style_service/create_native_styles.py | christineyi3898/googleads-python-lib | cd707dc897b93cf1bbb19355f7424e7834e7fb55 | [
"Apache-2.0"
] | null | null | null | examples/dfp/v201802/native_style_service/create_native_styles.py | christineyi3898/googleads-python-lib | cd707dc897b93cf1bbb19355f7424e7834e7fb55 | [
"Apache-2.0"
] | 1 | 2019-10-21T04:10:51.000Z | 2019-10-21T04:10:51.000Z | #!/usr/bin/env python
#
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Creates a native app install ad."""
import uuid
# Import appropriate modules from the client library.
from googleads import dfp
# This is the creative template ID for the system-defined native app install ad
# format, which we will create the native style from. Use
# CreativeTemplateService.getCreativeTemplateByStatement() and
# CreativeTemplate.isNativeEligible() to get other native ad formats available
# in your network.
CREATIVE_TEMPLATE_ID = 10004400
WIDTH = 300
HEIGHT = 345
HTML_SNIPPET = """<div id="adunit" style="overflow: hidden;">
<img src="[%Thirdpartyimpressiontracker%]" style="display:none">
<div class="attribution">Ad</div>
<div class="image">
<a class="image-link"
href="%%CLICK_URL_UNESC%%[%Thirdpartyclicktracker%]%%DEST_URL%%"
target="_top">
<img src="[%Image%]">
</a>
</div>
<div class="app-icon"><img src="[%Appicon%]"/></div>
<div class="title">
<a class="title-link"
href="%%CLICK_URL_UNESC%%[%Thirdpartyclicktracker%]%%DEST_URL%%"
target="_top">[%Headline%]</a>
</div>
<div class="reviews"></div>
<div class="body">
<a class="body-link"
href="%%CLICK_URL_UNESC%%[%Thirdpartyclicktracker%]%%DEST_URL%%"
target="_top">[%Body%]</a>
</div>
<div class="price">[%Price%]</div>
<div class="button">
<a class="button-link"
href="%%CLICK_URL_UNESC%%[%Thirdpartyclicktracker%]%%DEST_URL%%"
target="_top">[%Calltoaction%]</a>
</div>
</div>
"""
CSS_SNIPPET = """body {
background-color: rgba(255, 255, 255, 1);
font-family: "Roboto-Regular", sans-serif;
font-weight: normal;
font-size: 12px;
line-height: 14px;
}
.attribution {
background-color: rgba(236, 182, 0, 1);
color: rgba(255, 255, 255, 1);
font-size: 13px;
display: table;
margin: 4px 8px;
padding: 0 3px;
border-radius: 2px;
}
.image {
text-align: center;
margin: 8px;
}
.image img,
.image-link {
width: 100%;
}
.app-icon {
float: left;
margin: 0 8px 4px 8px;
height: 40px;
width: 40px;
background-color: transparent;
}
.app-icon img {
height: 100%;
width: 100%;
border-radius: 20%;
}
.title,
.promo-headline {
font-weight: bold;
font-size: 14px;
line-height: 20px;
margin: 8px 8px 4px 8px;
}
.title a,
.promo-headline {
color: rgba(112, 112, 112, 1);
text-decoration: none;
}
.reviews {
float: left;
}
.reviews svg {
fill: rgba(0, 0, 0, 0.7);
}
.body {
clear: left;
margin: 8px;
}
.body a {
color: rgba(110, 110, 110, 1);
text-decoration: none;
}
.price {
display: none;
}
.button {
font-size: 14px;
font-weight: bold;
float: right;
margin: 0px 16px 16px 0px;
white-space: nowrap;
}
.button a {
color: #2196F3;
text-decoration: none;
}
.button svg {
display: none;
}
"""
def main(client, html_snippet, css_snippet, creative_template_id, width,
height):
# Initialize appropriate service.
native_style_service = client.GetService('NativeStyleService',
version='v201802')
native_style = {
'name': 'Native style #%d' % uuid.uuid4(),
'htmlSnippet': html_snippet,
'cssSnippet': css_snippet,
'creativeTemplateId': creative_template_id,
'size': {
'width': width,
'height': height,
'isAspectRatio': False
}
}
# Create the native style on the server.
native_styles = native_style_service.createNativeStyles([native_style])
# Display results.
for native_style in native_styles:
print ('A Native style with ID "%s", name "%s", and creative template ID'
'"%d" was created.' % (native_style['id'], native_style['name'],
native_style['creativeTemplateId']))
if __name__ == '__main__':
# Initialize client object.
dfp_client = dfp.DfpClient.LoadFromStorage()
main(dfp_client, HTML_SNIPPET, CSS_SNIPPET, CREATIVE_TEMPLATE_ID, WIDTH,
HEIGHT)
| 25.58011 | 79 | 0.641685 |
import uuid
from googleads import dfp
CREATIVE_TEMPLATE_ID = 10004400
WIDTH = 300
HEIGHT = 345
HTML_SNIPPET = """<div id="adunit" style="overflow: hidden;">
<img src="[%Thirdpartyimpressiontracker%]" style="display:none">
<div class="attribution">Ad</div>
<div class="image">
<a class="image-link"
href="%%CLICK_URL_UNESC%%[%Thirdpartyclicktracker%]%%DEST_URL%%"
target="_top">
<img src="[%Image%]">
</a>
</div>
<div class="app-icon"><img src="[%Appicon%]"/></div>
<div class="title">
<a class="title-link"
href="%%CLICK_URL_UNESC%%[%Thirdpartyclicktracker%]%%DEST_URL%%"
target="_top">[%Headline%]</a>
</div>
<div class="reviews"></div>
<div class="body">
<a class="body-link"
href="%%CLICK_URL_UNESC%%[%Thirdpartyclicktracker%]%%DEST_URL%%"
target="_top">[%Body%]</a>
</div>
<div class="price">[%Price%]</div>
<div class="button">
<a class="button-link"
href="%%CLICK_URL_UNESC%%[%Thirdpartyclicktracker%]%%DEST_URL%%"
target="_top">[%Calltoaction%]</a>
</div>
</div>
"""
CSS_SNIPPET = """body {
background-color: rgba(255, 255, 255, 1);
font-family: "Roboto-Regular", sans-serif;
font-weight: normal;
font-size: 12px;
line-height: 14px;
}
.attribution {
background-color: rgba(236, 182, 0, 1);
color: rgba(255, 255, 255, 1);
font-size: 13px;
display: table;
margin: 4px 8px;
padding: 0 3px;
border-radius: 2px;
}
.image {
text-align: center;
margin: 8px;
}
.image img,
.image-link {
width: 100%;
}
.app-icon {
float: left;
margin: 0 8px 4px 8px;
height: 40px;
width: 40px;
background-color: transparent;
}
.app-icon img {
height: 100%;
width: 100%;
border-radius: 20%;
}
.title,
.promo-headline {
font-weight: bold;
font-size: 14px;
line-height: 20px;
margin: 8px 8px 4px 8px;
}
.title a,
.promo-headline {
color: rgba(112, 112, 112, 1);
text-decoration: none;
}
.reviews {
float: left;
}
.reviews svg {
fill: rgba(0, 0, 0, 0.7);
}
.body {
clear: left;
margin: 8px;
}
.body a {
color: rgba(110, 110, 110, 1);
text-decoration: none;
}
.price {
display: none;
}
.button {
font-size: 14px;
font-weight: bold;
float: right;
margin: 0px 16px 16px 0px;
white-space: nowrap;
}
.button a {
color: #2196F3;
text-decoration: none;
}
.button svg {
display: none;
}
"""
def main(client, html_snippet, css_snippet, creative_template_id, width,
height):
native_style_service = client.GetService('NativeStyleService',
version='v201802')
native_style = {
'name': 'Native style #%d' % uuid.uuid4(),
'htmlSnippet': html_snippet,
'cssSnippet': css_snippet,
'creativeTemplateId': creative_template_id,
'size': {
'width': width,
'height': height,
'isAspectRatio': False
}
}
native_styles = native_style_service.createNativeStyles([native_style])
for native_style in native_styles:
print ('A Native style with ID "%s", name "%s", and creative template ID'
'"%d" was created.' % (native_style['id'], native_style['name'],
native_style['creativeTemplateId']))
if __name__ == '__main__':
dfp_client = dfp.DfpClient.LoadFromStorage()
main(dfp_client, HTML_SNIPPET, CSS_SNIPPET, CREATIVE_TEMPLATE_ID, WIDTH,
HEIGHT)
| true | true |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.