index int64 | repo_name string | branch_name string | path string | content string | import_graph string |
|---|---|---|---|---|---|
62,140 | yss-810/test | refs/heads/master | /dame/demo_baidu_search.py | #百度搜索关键字
import time
from selenium import webdriver
driver=webdriver.Chrome()
driver.maximize_window()
#访问
driver.get("http://www.baidu.com")
driver.implicitly_wait(20)
#操作
driver.find_element_by_id('kw').send_keys("python")
driver.find_element_by_id('su').click()
time.sleep(3)
driver.find_element_by_partial_link_text('Python(计算机程序设计语言)_百度百科').click()
time.sleep(10)
# driver.back()
# driver.find_element_by_name('wd').send_keys("linux")
# driver.find_element_by_id('su').click()
# time.sleep(3)
# driver.find_element_by_name('wd').clear()
# time.sleep(3)
# driver.find_element_by_class_name('s_ipt').send_keys("自动化")
# driver.find_element_by_id('su').click()
# time.sleep(2)
#关闭
driver.quit() | {"/test_case/test_zhuce.py": ["/driver/browser.py", "/lib/utils.py", "/page/zhuce_page.py"], "/dame/test_suite_a.py": ["/dame/test_dame.py", "/dame/test_dame_baidu_sreach.py"], "/page/zhuce_page.py": ["/page/base_page.py"], "/test_case/test_login.py": ["/driver/browser.py", "/page/login_page.py"]} |
62,141 | yss-810/test | refs/heads/master | /dame/demo_jd_leimu.py | import time
from selenium import webdriver
from selenium.webdriver import ActionChains
from selenium.webdriver.common.by import By
from selenium.webdriver.common.keys import Keys
driver=webdriver.Chrome()
driver.maximize_window()
driver.get('https://www.jd.com')
#鼠标悬停
# driver.find_element(By.XPATH,'//div[@id="J_cate"]/ul/li[9]/a[1]')
locator=driver.find_element(By.LINK_TEXT,'房产')
action = ActionChains(driver)
action.move_to_element(locator).perform()
time.sleep(5)
driver.find_element(By.LINK_TEXT,'别墅').click()
time.sleep(3)
#切换窗口
handles=driver.window_handles
driver.switch_to.window(handles[-1])
#搜索业务--鼠标
driver.find_element(By.XPATH,'//input[@id="key"]').send_keys('天宸原著')
driver.find_element(By.XPATH,'//input[@id="key"]').send_keys(Keys.ENTER)
time.sleep(5)
#删除业务--鼠标
driver.find_element(By.XPATH,'//input[@id="key"]').send_keys(Keys.CONTROL,"a")
driver.find_element(By.XPATH,'//input[@id="key"]').send_keys(Keys.BACK_SPACE)
# #重复删除
# for i in 5:
# driver.find_element(By.XPATH, '//input[@id="key"]').send_keys(Keys.BACK_SPACE)
time.sleep(5)
driver.quit() | {"/test_case/test_zhuce.py": ["/driver/browser.py", "/lib/utils.py", "/page/zhuce_page.py"], "/dame/test_suite_a.py": ["/dame/test_dame.py", "/dame/test_dame_baidu_sreach.py"], "/page/zhuce_page.py": ["/page/base_page.py"], "/test_case/test_login.py": ["/driver/browser.py", "/page/login_page.py"]} |
62,142 | yss-810/test | refs/heads/master | /run_all.py | import unittest
from HTMLTestRunner import HTMLTestRunner
discover=unittest.defaultTestLoader.discover(start_dir='test_case',
pattern='test*.py',
top_level_dir=None)
report_path='report/'+'report.html'
with open(report_path,'wb')as file:
runner=HTMLTestRunner(
stream=file,
title='自动化测试报告-登录',
description='报告详情'
)
runner.run(discover) | {"/test_case/test_zhuce.py": ["/driver/browser.py", "/lib/utils.py", "/page/zhuce_page.py"], "/dame/test_suite_a.py": ["/dame/test_dame.py", "/dame/test_dame_baidu_sreach.py"], "/page/zhuce_page.py": ["/page/base_page.py"], "/test_case/test_login.py": ["/driver/browser.py", "/page/login_page.py"]} |
62,143 | yss-810/test | refs/heads/master | /dame/dame_taobao_cz.py | import time
from selenium import webdriver
driver=webdriver.Chrome()
driver.maximize_window()
driver.get('https://www.taobao.com/')
time.sleep(2)
#充值业务
driver.find_element_by_link_text('充话费').click()
time.sleep(2)
print('打印当前url',driver.current_url)
#切换窗口
handles=driver.window_handles
driver.switch_to.window(handles[-1])
print('打印当前url',driver.current_url)
#输入充值号码是及页面
#跳入frame
driver.swicth_to.frame(driver.find_element_by_xpath('/html/body/iframe'))
driver.find_element_by_id('JCZ7').send_keys('15928561321')
time.sleep(2)
driver.find_element_by_xpath('//div[@id="cz"]/form/div[3]/div/ul/li[1]/span').click()
driver.switch_to.parent_frame()
driver.quit() | {"/test_case/test_zhuce.py": ["/driver/browser.py", "/lib/utils.py", "/page/zhuce_page.py"], "/dame/test_suite_a.py": ["/dame/test_dame.py", "/dame/test_dame_baidu_sreach.py"], "/page/zhuce_page.py": ["/page/base_page.py"], "/test_case/test_login.py": ["/driver/browser.py", "/page/login_page.py"]} |
62,144 | yss-810/test | refs/heads/master | /page/login_page.py |
from selenium.webdriver.common.by import By
"""页面封装"""
class LoginPage():
def __init__(self,driver):
self.driver=driver
self.locator_ele_username=(By.XPATH,('/html/body/div[5]/div[3]/div[1]/form/table/tbody/tr[1]/td[2]/input'))
self.locator_ele_password=(By.XPATH,('/html/body/div[5]/div[3]/div[1]/form/table/tbody/tr[2]/td[2]/input'))
self.locator_ele_submit=(By.XPATH,('/html/body/div[5]/div[3]/div[1]/form/table/tbody/tr[4]/td[2]/input[3]'))
self.locator_ele_rasser=(By.XPATH, ('//font[@id="ECS_MEMBERZONE"]/a[1]'))
self.url="http://192.168.4.231/upload/user.php"
def ele_username(self,username):
self.driver.find_element(*self.locator_ele_username).send_keys(username)
def ele_password(self,password):
self.driver.find_element(*self.locator_ele_password).send_keys(password)
def ele_submit(self):
self.driver.find_element(*self.locator_ele_submit).click()
def ele_rasser(self):
result = self.driver.find_element(*self.locator_ele_rasser).text
return result
def open(self):
self.driver.get(self.url)
def login(self,username,password):
self.open()
self.ele_username(username)
self.ele_password(password)
self.ele_submit()
assert_reuslt=self.ele_rasser()
return assert_reuslt | {"/test_case/test_zhuce.py": ["/driver/browser.py", "/lib/utils.py", "/page/zhuce_page.py"], "/dame/test_suite_a.py": ["/dame/test_dame.py", "/dame/test_dame_baidu_sreach.py"], "/page/zhuce_page.py": ["/page/base_page.py"], "/test_case/test_login.py": ["/driver/browser.py", "/page/login_page.py"]} |
62,145 | yss-810/test | refs/heads/master | /dame/demo_es_denglu.py | import time
from selenium.webdriver.support.select import Select
from selenium import webdriver
driver = webdriver.Chrome()
driver.maximize_window()
#打开浏览器登录页面
driver.get('http://192.168.4.223/upload/')
driver.implicitly_wait(10)#隐式等待
time.sleep(2)
#验证码处理
driver.add_cookie({'name':'ECS[password]','value':'20c801b5e8531a40a6ecc4903af30708'})
driver.add_cookie({'name':'ECS[user_id]','value':'37'})
driver.add_cookie({'name':'ECS[username]','value':'admin3'})
driver.add_cookie({'name':'ECS[visit_times]','value':'1'})
driver.add_cookie({'name':'ECS_ID','value':'d02e377be002ee60415a61b54c0996d90a9c458a'})
#刷新
# driver.refresh()
driver.get('http://192.168.4.223/upload/')
# #登录
# driver.find_element_by_link_text('登录').click()
# time.sleep(2)
# #输入
# driver.find_element_by_name("username").send_keys('admin3')
# driver.find_element_by_name("password").send_keys('LS514320ls')
# driver.find_element_by_name("remember").click()
# driver.find_element_by_name("submit").click()
# driver.implicitly_wait(10)#隐式等待
# driver.find_element_by_link_text('女装').click()
# driver.find_element_by_partial_link_text('秋冬').click()
# driver.find_element_by_xpath('//*[@id="ECS_FORMBUY"]/ul[3]/li[2]/a/img').click()
#进入详细页面,定位尺寸元素位置,点击
#定位数量输入框,输入购买量
#定位立即购买按钮位置,点击
time.sleep(5)
driver.quit() | {"/test_case/test_zhuce.py": ["/driver/browser.py", "/lib/utils.py", "/page/zhuce_page.py"], "/dame/test_suite_a.py": ["/dame/test_dame.py", "/dame/test_dame_baidu_sreach.py"], "/page/zhuce_page.py": ["/page/base_page.py"], "/test_case/test_login.py": ["/driver/browser.py", "/page/login_page.py"]} |
62,150 | silenove/ssd_vgg-Tensorflow | refs/heads/master | /utils/network_util.py | """The utils needed in SSD implementations."""
import numpy as np
import tensorflow as tf
def encode_gtbboxes_from_one_layer(ssd_params,
anchors_cord,
gt_labels,
gt_bboxes,
matching_threshold=0.5,
dtype=tf.float32):
"""Encode gt labels and bboxes using SSD anchors from one layer.
Arguments:
anchors_layer: the default anchors in one layer.
gt_labels: 1D tensor(int64) containing gt labels.
gt_bboxes: Nx4 tensor(float) with bboxes relative coordinates.
matching_threshold: Threshold for positive match with gt bboxes.
"""
y_ref, x_ref, h_ref, w_ref = anchors_cord
ymin = y_ref - h_ref / 2.0
ymax = y_ref + h_ref / 2.0
xmin = x_ref - w_ref / 2.0
xmax = x_ref + w_ref / 2.0
anchors = [ymin, xmin, ymax, xmax]
shape_ref = y_ref.shape
anchor_labels = tf.zeros(shape_ref, dtype=tf.int64)
anchor_scores = tf.zeros(shape_ref, dtype=dtype)
target_ymin = tf.zeros(shape_ref, dtype=dtype)
target_xmin = tf.zeros(shape_ref, dtype=dtype)
target_ymax = tf.ones(shape_ref, dtype=dtype)
target_xmax = tf.ones(shape_ref, dtype=dtype)
def condition(i, anchor_labels, anchor_scores, target_ymin,
target_xmin, target_ymax, target_xmax):
cond = tf.less(i, tf.shape(gt_labels))
return cond[0]
def body(i, anchor_labels, anchor_scores, target_ymin,
target_xmin, target_ymax, target_xmax):
label = gt_labels[i]
bbox = gt_bboxes[i]
jaccard = jaccard_between_anchors_and_gt(anchors, bbox)
mask = tf.greater(jaccard, anchor_scores)
mask = tf.logical_and(mask, tf.greater(anchor_scores, matching_threshold))
mask = tf.logical_and(mask, label < ssd_params.num_classes)
mask_int = tf.cast(mask, tf.int64)
mask_float = tf.cast(mask, dtype)
anchor_labels = mask_int * label + (1 - mask_int) * anchor_labels
anchor_scores = tf.where(mask, jaccard, anchor_scores)
target_ymin = mask_float * bbox[0] + (1 - mask_float) * target_ymin
target_xmin = mask_float * bbox[1] + (1 - mask_float) * target_xmin
target_ymax = mask_float * bbox[2] + (1 - mask_float) * target_ymax
target_xmax = mask_float * bbox[3] + (1 - mask_float) * target_xmax
return [i + 1, anchor_labels, anchor_scores, target_ymin, target_xmin,
target_ymax, target_xmax]
i = 0
[i, anchor_labels, anchor_scores, target_ymin, target_xmin,
target_ymax, target_xmax] = tf.while_loop(condition, body,
[i, anchor_labels, anchor_scores,
target_ymin, target_xmin,
target_ymax, target_xmax])
# Transform to center / size
target_y = (target_ymin + target_ymax) / 2
target_x = (target_xmin + target_xmax) / 2
target_h = target_ymax - target_ymin
target_w = target_xmax - target_xmin
devi_y = (target_y - y_ref) / h_ref / ssd_params.prior_variance[0]
devi_x = (target_x - x_ref) / w_ref / ssd_params.prior_variance[1]
devi_h = tf.log(target_h / h_ref) / ssd_params.prior_variance[2]
devi_w = tf.log(target_w / w_ref) / ssd_params.prior_variance[3]
target_loc = tf.stack([devi_y, devi_x, devi_h, devi_w], axis=-1)
return anchor_labels, anchor_scores, target_loc
def encode_gtbboxes_from_all_layers(ssd_params,
anchors_cords,
gt_labels,
gt_bboxes,
matching_threshold=0.5,
dtype=tf.float32,
scope='gtbboxes_encoder'):
"""Encode gt labels and bboxes using SSD anchors from all layers.
Arguments:
anchors_layer: the default anchors in one layer.
gt_labels: 1D tensor(int64) containing gt labels.
gt_bboxes: Nx4 tensor(float) with bboxes relative coordinates.
matching_threshold: Threshold for positive match with gt bboxes.
"""
with tf.name_scope(scope):
target_labels = []
target_scores = []
target_locs = []
for layer in ssd_params.featmap_layers:
with tf.name_scope('gtbboxes_encoder_%s' % layer):
labels, scores, loc = encode_gtbboxes_from_one_layer(ssd_params,
anchors_cords[layer],
gt_labels,
gt_bboxes,
matching_threshold,
dtype)
target_labels.append(labels)
target_scores.append(scores)
target_locs.append(loc)
return target_labels, target_scores, target_locs
def jaccard_between_anchors_and_gt(anchors, bbox):
"""Compute jaccard score between a box and anchors of one layer.
Arguments:
anchors: anchors coordinates of one layer:[ymin, xmin, ymax, xmax], shape: (4 x h x w x num_anchors)
bbox: a box coordinates: [ymin, xmin, ymax, xmax], shape: (4,)
"""
ymin, xmin, ymax, xmax = anchors
inter_ymin = tf.maximum(ymin, bbox[0])
inter_xmin = tf.maximum(xmin, bbox[1])
inter_ymax = tf.minimum(ymax, bbox[2])
inter_xmax = tf.minimum(xmax, bbox[3])
inter_h = tf.maximum(inter_ymax - inter_ymin, 0.0)
inter_w = tf.maximum(inter_xmax - inter_xmin, 0.0)
vol_anchors = (ymax - ymin) * (xmax - xmin)
inter_vol = tf.multiply(inter_h, inter_w)
union_vol = vol_anchors - inter_vol + ((bbox[2] - bbox[0]) * (bbox[3] - bbox[1]))
jaccard = tf.where(tf.greater(union_vol, 0.), tf.divide(inter_vol, union_vol),
tf.zeros_like(inter_vol), name='jaccard')
return jaccard
def abs_smooth_L1(x):
return tf.where(tf.less(tf.abs(x), 1), 0.5 * tf.pow(x, 2), tf.abs(x) - 0.5)
def decode_bboxes_from_all_layer(locs_pred, anchors,
prior_scaling=[0.1, 0.1, 0.2, 0.2],
scope='ssd_bboxes_decode'):
"""Compute the relative bounding boxes from the SSD predicted localization and
default anchors.
"""
with tf.name_scope(scope):
y_ref, x_ref, h_ref, w_ref = anchors
y = y_ref + locs_pred[:, :, 0] * h_ref * prior_scaling[0]
x = x_ref + locs_pred[:, :, 1] * w_ref * prior_scaling[1]
h = h_ref * tf.exp(locs_pred[:, :, 2] * prior_scaling[2])
w = w_ref * tf.exp(locs_pred[:, :, 3] * prior_scaling[3])
ymin = y - h / 2
xmin = x - w / 2
ymax = y + h / 2
xmax = x + w / 2
return tf.stack([ymin, xmin, ymax, xmax], axis=-1)
def select_detected_bboxes_every_class(predictions, localizations, num_classes,
select_threshold=0.01,
scope='detected_bboxes_select_every_class'):
"""Select detected bboxes based on select_threshold, sort the detected bboxes
in every class.
Arguments:
predictions: batch_size x -1 x num_classes Tensor.
localization: batch_size x -1 x 4 Tensor.
"""
with tf.name_scope(scope, values=[predictions, localizations]):
dict_scores_filt = {}
dict_bboxes_filt = {}
for cls in range(1, num_classes):
scores = predictions[:, :, cls]
fmask = tf.cast(tf.greater_equal(scores, select_threshold), scores.dtype)
scores = scores * fmask
bboxes = localizations * tf.expand_dims(fmask, axis=-1)
dict_scores_filt[cls] = scores
dict_bboxes_filt[cls] = bboxes
return dict_scores_filt, dict_bboxes_filt
def select_detected_bboxes_all_classes(predictions, localizations,
num_classes, select_threshold=0.01,
scope='detected_bboxes_select_all_class'):
"""Select detected bboxes based on select_threshold, sort the detected bboxes
in argmax class by predictions.
Arguments:
predictions: batch_size x -1 x num_classes Tensor.
localization: batch_size x -1 x 4 Tensor.
"""
with tf.name_scope(scope, values=[predictions, localizations]):
# Compute the max-prediction class (except background)
dict_scores_filt = {}
dict_bboxes_filt = {}
max_scores = tf.reduce_max(predictions[:, :, 1:], axis=-1)
max_mask = tf.equal(predictions, tf.expand_dims(max_scores, axis=-1))
predictions = predictions * tf.cast(max_mask, predictions.dtype)
for cls in range(1, num_classes):
scores = predictions[:, :, cls]
fmask = tf.cast(tf.greater_equal(scores, select_threshold), scores.dtype)
scores = scores * fmask
bboxes = localizations * tf.expand_dims(fmask, axis=-1)
dict_scores_filt[cls] = scores
dict_bboxes_filt[cls] = bboxes
return dict_scores_filt, dict_bboxes_filt
| {"/datasets/pascal2012.py": ["/utils/dataset_util.py"], "/datasets/voc_to_tfrecords.py": ["/utils/dataset_util.py", "/datasets/pascal2012.py"], "/eval_ssd_network.py": ["/utils/bbox_util.py"]} |
62,151 | silenove/ssd_vgg-Tensorflow | refs/heads/master | /preprocessing/preprocessing_factory.py | """Contains a factory for building various models."""
import tensorflow as tf
from preprocessing import ssd_vgg_preprocess
slim = tf.contrib.slim
preprocessing_fn_map = {
'ssd_vgg_300': ssd_vgg_preprocess
}
def get_preprocessing(name):
"""Return preprocessing_fn(image, labels, bboxes, ...)"""
if name not in preprocessing_fn_map.keys():
raise ValueError('Preprocessing name [%s] was not recognized.' % name)
return preprocessing_fn_map[name].preprocess_image
| {"/datasets/pascal2012.py": ["/utils/dataset_util.py"], "/datasets/voc_to_tfrecords.py": ["/utils/dataset_util.py", "/datasets/pascal2012.py"], "/eval_ssd_network.py": ["/utils/bbox_util.py"]} |
62,152 | silenove/ssd_vgg-Tensorflow | refs/heads/master | /preprocessing/ssd_vgg_preprocess.py | """Preprocess images and bounding boxes for detection."""
import functools
import tensorflow as tf
from tensorflow.python.ops import control_flow_ops
from utils import bbox_util
slim = tf.contrib.slim
BBOX_CROP_OVERLAP = 0.5
MIN_OBJECT_COVERED = 0.25
EVAL_SIZE = (300, 300)
def tf_summary_image(image, bboxes, name='image'):
"""Add image with bounding boxes to summary."""
image = tf.expand_dims(image, 0)
bboxes = tf.expand_dims(bboxes, 0)
image_with_bbox = tf.image.draw_bounding_boxes(image, bboxes)
tf.summary.image(name, image_with_bbox)
def normalize_image(image, original_minval, original_maxval, target_minval,
target_maxval):
"""Normalizes pixel values in the image.
Move the pixel values from [original_minval, original_maxval] range to
[target_minval, target_maxval].
"""
with tf.name_scope('NormalizeImage', values=[image]):
original_minval = float(original_minval)
original_maxval = float(original_maxval)
target_minval = float(target_minval)
target_maxval = float(target_maxval)
image = tf.to_float(image)
image = tf.subtract(image, original_minval)
image = tf.multiply(image, (target_maxval - target_minval) /
(original_maxval - original_minval))
image = tf.add(image, target_minval)
return image
def apply_with_random_selector(x, func, num_cases):
"""Computes func(x, sel) with sel sampled from [0, ..., num_cases - 1]"""
sel = tf.random_uniform([], maxval=num_cases, dtype=tf.int32)
# Pass the real x only to one of the func calls.
return control_flow_ops.merge([func(
control_flow_ops.switch(x, tf.equal(sel, case))[1], case)
for case in range(num_cases)])[0]
def random_flip_left_right(image, bboxes, seed=None):
"""Random flip left-right of an image and its bounding boxes."""
def flip_bboxes(bboxes):
"""Filp bounding boxes coordinates."""
bboxes = tf.stack([bboxes[:, 0], 1 - bboxes[:, 3],
bboxes[:, 2], 1 - bboxes[:, 1]], axis=-1)
return bboxes
with tf.name_scope('random_flip_left_right'):
cond = tf.less(tf.random_uniform([], 0., 1., seed=seed), 0.5)
image = tf.cond(cond, lambda: tf.image.flip_left_right(image), lambda: image)
bboxes = tf.cond(cond, lambda: flip_bboxes(bboxes), lambda: bboxes)
return image, bboxes
def distort_color(image, color_ordering=0, fast_mode=True, scope=None):
"""Distort the color of a tensor image.
Arguments:
image: 3-D Tensor containing single image in [0,1].
color_ordering: Python int, a type of distortion (valid values: 0-3).
fast_mode: Avoids slower ops (random_hue and random_contrast)
Returns:
3-D Tensor color-distorted image on range [0, 1]
"""
with tf.name_scope(scope, 'distort_color', [image]):
if fast_mode:
if color_ordering == 0:
image = tf.image.random_brightness(image, max_delta=32. / 255.)
image = tf.image.random_saturation(image, lower=0.8, upper=1.25)
else:
image = tf.image.random_saturation(image, lower=0.8, upper=1.25)
image = tf.image.random_brightness(image, max_delta=32. / 255.)
else:
if color_ordering == 0:
image = tf.image.random_brightness(image, max_delta=32. / 255.)
image = tf.image.random_saturation(image, lower=0.8, upper=1.25)
image = tf.image.random_hue(image, max_delta=0.2)
image = tf.image.random_contrast(image, lower=0.8, upper=1.25)
elif color_ordering == 1:
image = tf.image.random_saturation(image, lower=0.8, upper=1.25)
image = tf.image.random_brightness(image, max_delta=32. / 255.)
image = tf.image.random_contrast(image, lower=0.8, upper=1.25)
image = tf.image.random_hue(image, max_delta=0.2)
elif color_ordering == 2:
image = tf.image.random_contrast(image, lower=0.8, upper=1.25)
image = tf.image.random_hue(image, max_delta=0.2)
image = tf.image.random_brightness(image, max_delta=32. / 255.)
image = tf.image.random_saturation(image, lower=0.8, upper=1.25)
elif color_ordering == 3:
image = tf.image.random_hue(image, max_delta=0.2)
image = tf.image.random_saturation(image, lower=0.8, upper=1.25)
image = tf.image.random_contrast(image, lower=0.8, upper=1.25)
image = tf.image.random_brightness(image, max_delta=32. / 255.)
else:
raise ValueError('color_ordering must be in [0, 3]')
return tf.clip_by_value(image, 0., 1.)
def _rgb_to_grayscale(images, name=None):
"""Converts one or more images from RGB to Grayscale."""
with tf.name_scope(name, 'rgb_to_grayscale', [images]) as name:
images = tf.convert_to_tensor(images, name='images')
orig_dtype = images.dtype
flt_image = tf.image.convert_image_dtype(images, tf.float32)
rgb_weights = [0.2989, 0.5870, 0.1140]
rank_1 = tf.expand_dims(tf.rank(images) - 1, 0)
gray_float = tf.reduce_sum(flt_image * rgb_weights, rank_1, keep_dims=True)
gray_float.set_shape(images.get_shape()[:-1].concatenate([1]))
return tf.image.convert_image_dtype(gray_float, orig_dtype, name=name)
def random_rgb_to_gray(image, probability=0.1, seed=None):
"""Changes the image from RGB to Grayscale with the given probability.
Arguments:
image: rank 3 float32 tensor contains 1 image -> [height, width, channels]
with pixel values varying between [0, 255].
probability: the probability of returning a grayscale image. [0, 1]
"""
def _image_to_gray(image):
image_gray1 = _rgb_to_grayscale(image)
image_gray3 = tf.image.grayscale_to_rgb(image_gray1)
return image_gray3
with tf.name_scope('RandomRGBtoGray', values=[image]):
do_gray = tf.random_uniform([], minval=0., maxval=1., seed=seed)
image = tf.cond(tf.greater(do_gray, probability), lambda: image,
lambda: _image_to_gray(image))
return image
def distorted_bounding_box_crop(image, labels, bboxes, bbox=None,
min_object_covered=0.3,
aspect_ratio_range=(0.9, 1.1),
area_range=(0.1, 1.0),
max_attempts=200,
clip_bboxes=True,
scope=None):
"""Generates cropped_image using a one of the bboxes randomly distorted.
Arguments:
image: 3-D Tensor of image (it will be converted to float in [0, 1]).
bbox: 3-D float Tensor of bounding boxes arranged [1, num_boxes, coords]
where each coordinate is [0, 1) and the coordinates are arranged as
[ymin, xmin, ymax, xmax]. If num_boxes is 0 then it would use the whole image.
"""
with tf.name_scope(scope, 'distorted_bounding_box_crop', [image, bbox]):
if bbox is None:
bbox = tf.constant([0., 0., 1., 1.], dtype=tf.float32, shape=[1, 1, 4])
bbox_begin, bbox_size, distort_bbox = tf.image.sample_distorted_bounding_box(
tf.shape(image),
bounding_boxes=bbox,
min_object_covered=min_object_covered,
aspect_ratio_range=aspect_ratio_range,
area_range=area_range,
max_attempts=max_attempts,
use_image_if_no_bounding_boxes=True
)
cropped_image = tf.slice(image, bbox_begin, bbox_size)
cropped_image.set_shape([None, None, image.get_shape()[2]])
distort_bbox = distort_bbox[0, 0]
bboxes = bbox_util.bboxes_resize(distort_bbox, bboxes)
labels, bboxes = bbox_util.bboxes_filter_overlap(labels, bboxes,
threshold=BBOX_CROP_OVERLAP,
assign_negative=False)
if clip_bboxes:
bboxes = bbox_util.bboxes_clip(distort_bbox, bboxes)
return cropped_image, labels, bboxes
def preprocess_for_train(image, labels, bboxes, out_height, out_width, bbox,
fast_mode, data_format,
scope='ssd_preprocessing_train'):
"""Preprocess the given image for training.
Arguments:
bbox: 3-D float Tensor of bounding boxes arranged [1, num_boxes, coords]
used in image distorted crop.
"""
with tf.name_scope(scope, 'ssd_preprocess_train', [image, labels, bboxes]):
if image.get_shape().ndims != 3:
raise ValueError('Input must be of size [height, width, channels].')
if image.dtype != tf.float32:
image = tf.image.convert_image_dtype(image, dtype=tf.float32)
tf_summary_image(image, bboxes, 'image_with_bboxes')
if data_format == 'NHCW':
image = tf.transpose(image, perm=(2, 0, 1))
image = random_rgb_to_gray(image, 0.1)
dst_image, dst_labels, dst_bboxes = distorted_bounding_box_crop(image,
labels,
bboxes,
bbox,
min_object_covered=MIN_OBJECT_COVERED)
dst_image = tf.image.resize_images(dst_image, [out_height, out_width])
dst_image, dst_bboxes = random_flip_left_right(dst_image, dst_bboxes)
dst_image = apply_with_random_selector(dst_image,
lambda x, order: distort_color(x, order, fast_mode),
num_cases=4)
image = dst_image * 255.
return image, labels, bboxes
def preprocess_for_eval(image, labels, bboxes,
out_height, out_width,
data_format,
difficults,
scope='ssd_preprocessing_eval'):
"""Preprocess an image for evaluation."""
with tf.name_scope(scope, 'ssd_preprocessing_eval'):
if image.get_shape().ndims != 3:
raise ValueError('Input must be of size [height, width, channels].')
if data_format == 'NCHW':
image = tf.transpose(image, perm=[2, 0, 1])
image = tf.image.resize_images(image, [out_height, out_width],
method=tf.image.ResizeMethod.BILINEAR,
align_corners=False)
if difficults is not None:
mask = tf.logical_not(tf.cast(difficults, tf.bool))
labels = tf.boolean_mask(labels, mask)
bboxes = tf.boolean_mask(bboxes, mask)
return image, labels, bboxes
def preprocess_image(image,
labels,
bboxes,
out_height=EVAL_SIZE[0], out_width=EVAL_SIZE[1],
bbox=None,
fast_mode=False,
data_format='NHWC',
difficults=None,
is_training=False):
if is_training:
return preprocess_for_train(image, labels, bboxes, out_height, out_width,
bbox, fast_mode, data_format)
else:
return preprocess_for_eval(image, labels, bboxes,
out_height, out_width,
data_format, difficults)
| {"/datasets/pascal2012.py": ["/utils/dataset_util.py"], "/datasets/voc_to_tfrecords.py": ["/utils/dataset_util.py", "/datasets/pascal2012.py"], "/eval_ssd_network.py": ["/utils/bbox_util.py"]} |
62,153 | silenove/ssd_vgg-Tensorflow | refs/heads/master | /datasets/pascal2012.py | """Provide the Dataset of Pascal Voc dataset."""
import os
import tensorflow as tf
from utils.dataset_util import read_label_file
slim = tf.contrib.slim
VOC_LABELS = {
'none': (0, 'Background'),
'aeroplane': (1, 'Vehicle'),
'bicycle': (2, 'Vehicle'),
'bird': (3, 'Animal'),
'boat': (4, 'Vehicle'),
'bottle': (5, 'Indoor'),
'bus': (6, 'Vehicle'),
'car': (7, 'Vehicle'),
'cat': (8, 'Animal'),
'chair': (9, 'Indoor'),
'cow': (10, 'Animal'),
'diningtable': (11, 'Indoor'),
'dog': (12, 'Animal'),
'horse': (13, 'Animal'),
'motorbike': (14, 'Vehicle'),
'person': (15, 'Person'),
'pottedplant': (16, 'Indoor'),
'sheep': (17, 'Animal'),
'sofa': (18, 'Indoor'),
'train': (19, 'Vehicle'),
'tvmonitor': (20, 'Indoor'),
}
TOTAL_SIZE = 17125
SPLIT_TO_SIZES = {'train': 14125, 'validation': 3000}
NUM_CLASSES = 21 # include background
_ITEMS_TO_DESCRIPTIONS = {
'image': 'A color image of varying size.',
'image/height': 'The height of the image in pixel.',
'image/width': 'The width of the image in pixel.',
'image/channels': 'The channels of the image.',
'object/bbox': 'The bboxes(ymin, xmin, ymax, xmax) of all objects in the image.',
'object/label': 'The labels of all objects in the image.',
'object/difficult': 'tf.int64, the difficulties to recognize every object in the image.',
'object/truncated': 'tf.int64, the truncation of all objects in the image.',
}
def get_split(split_name, dataset_dir, file_pattern=None, reader=None):
"""Get a dataset with instructions for reading PASCAL VOC dataset."""
if split_name not in ['train', 'validation']:
raise ValueError('split name %s is not recognized.' % split_name)
if not file_pattern:
file_pattern = 'data_%s.tfrecord'
file_pattern = os.path.join(dataset_dir, file_pattern % split_name)
if reader is None:
reader = tf.TFRecordReader
keys_to_features = {
'image/encoded': tf.FixedLenFeature((), tf.string, default_value=''),
'image/format': tf.FixedLenFeature((), tf.string, default_value='jpeg'),
'image/height': tf.FixedLenFeature([1], tf.int64),
'image/width': tf.FixedLenFeature([1], tf.int64),
'image/channels': tf.FixedLenFeature([1], tf.int64),
'image/object/bbox/xmin': tf.VarLenFeature(dtype=tf.float32),
'image/object/bbox/xmax': tf.VarLenFeature(dtype=tf.float32),
'image/object/bbox/ymin': tf.VarLenFeature(dtype=tf.float32),
'image/object/bbox/ymax': tf.VarLenFeature(dtype=tf.float32),
'image/object/bbox/label': tf.VarLenFeature(dtype=tf.int64),
'image/object/bbox/difficult': tf.VarLenFeature(dtype=tf.int64),
'image/object/bbox/truncated': tf.VarLenFeature(dtype=tf.int64),
}
items_to_handlers = {
'image': slim.tfexample_decoder.Image('image/encoded', 'image/format'),
'image/height': slim.tfexample_decoder.Tensor('image/height'),
'image/width': slim.tfexample_decoder.Tensor('image/width'),
'image/channels': slim.tfexample_decoder.Tensor('image/channels'),
'object/bbox': slim.tfexample_decoder.BoundingBox(
['ymin', 'xmin', 'ymax', 'xmax'], 'image/object/bbox/'
),
'object/label': slim.tfexample_decoder.Tensor('image/object/bbox/label'),
'object/difficult': slim.tfexample_decoder.Tensor('image/object/bbox/difficult'),
'object/truncated': slim.tfexample_decoder.Tensor('image/object/bbox/truncated'),
}
decoder = slim.tfexample_decoder.TFExampleDecoder(keys_to_features, items_to_handlers)
labels_to_names = read_label_file(dataset_dir)
return slim.dataset.Dataset(
data_sources=file_pattern,
reader=reader,
decoder=decoder,
num_samples=SPLIT_TO_SIZES[split_name],
items_to_descriptions=_ITEMS_TO_DESCRIPTIONS,
num_classes=NUM_CLASSES,
labels_to_names=labels_to_names,
)
| {"/datasets/pascal2012.py": ["/utils/dataset_util.py"], "/datasets/voc_to_tfrecords.py": ["/utils/dataset_util.py", "/datasets/pascal2012.py"], "/eval_ssd_network.py": ["/utils/bbox_util.py"]} |
62,154 | silenove/ssd_vgg-Tensorflow | refs/heads/master | /datasets/voc_to_tfrecords.py | """Convert raw PASCAL VOC dataset to TFRecord for object detection.
The Example proto contains the following fields:
image/encoded: string, containing JPEG image in RGB colorspace
image/height: integer, image height in pixels
image/width: integer, image width in pixels
image/format: string, specifying the format, like 'JPEG'
image/object/bbox/xmin: list of float specifying the bboxes.
image/object/bbox/xmax: list of float specifying the bboxes.
image/object/bbox/ymin: list of float specifying the bboxes.
image/object/bbox/ymax: list of float specifying the bboxes.
image/object/bbox/label: list of integer specifying the classification index.
"""
import os
import sys
import random
import numpy as np
import tensorflow as tf
from xml.etree import ElementTree
from utils.dataset_util import write_label_file
from datasets.pascal2012 import VOC_LABELS, SPLIT_TO_SIZES
from utils.dataset_util import int64_list_feature, bytes_list_feature, float_list_feature, \
bytes_feature, int64_feature
DIRECTORY_ANNOTATIONS = 'Annotations/'
DIRECTORY_IMAGES = 'JPEGImages/'
# The number of images(total 17125) in the validation set.
_NUM_VALIDATION = SPLIT_TO_SIZES['validation']
# Seed for repeatability
_RANDOM_SEED = 123
FLAGS = tf.app.flags.FLAGS
tf.app.flags.DEFINE_string('dataset_dir', '', 'The dataset directory where the dataset is stored.')
tf.app.flags.DEFINE_string('meta_directory', '', 'The directory containing images and annotations dir')
def _image_to_tfexample(image_name, annotation_name):
"""Generate a tf example by image and annotation file."""
image_data = tf.gfile.FastGFile(image_name, 'rb').read()
tree = ElementTree.parse(annotation_name)
root = tree.getroot()
# image shape
size = root.find('size')
height = int(size.find('height').text)
width = int(size.find('width').text)
channels = int(size.find('depth').text)
# image annotations
xmin = []
xmax = []
ymin = []
ymax = []
labels = []
labels_text = []
difficult = []
truncated = []
for obj in root.findall('object'):
label_name = obj.find('name').text
labels.append(int(VOC_LABELS[label_name][0]))
labels_text.append(label_name.encode('ascii'))
if obj.find('difficult'):
difficult.append(int(obj.find('difficult').text))
else:
difficult.append(0)
if obj.find('truncated'):
truncated.append(int(obj.find('truncated').text))
else:
truncated.append(0)
bbox = obj.find('bndbox')
xmin.append(float(bbox.find('xmin').text) / width)
xmax.append(float(bbox.find('xmax').text) / width)
ymin.append(float(bbox.find('ymin').text) / height)
ymax.append(float(bbox.find('ymax').text) / height)
example = tf.train.Example(features=tf.train.Features(feature={
'image/encoded': bytes_feature(image_data),
'image/format': bytes_feature(b'JPEG'),
'image/height': int64_feature(height),
'image/width': int64_feature(width),
'image/channels': int64_feature(channels),
'image/object/bbox/xmin': float_list_feature(xmin),
'image/object/bbox/xmax': float_list_feature(xmax),
'image/object/bbox/ymin': float_list_feature(ymin),
'image/object/bbox/ymax': float_list_feature(ymax),
'image/object/bbox/label': int64_list_feature(labels),
'image/object/bbox/text': bytes_list_feature(labels_text),
'image/object/bbox/difficult': int64_list_feature(difficult),
'image/object/bbox/truncated': int64_list_feature(truncated),
}))
return example
def _get_dataset_name(dataset_dir, split_name):
output_filename = 'data_%s.tfrecord' % split_name
return os.path.join(dataset_dir, output_filename)
def _dataset_exist(dataset_dir):
for split_name in ['train', 'validation']:
output_filename = _get_dataset_name(dataset_dir, split_name)
if not tf.gfile.Exists(output_filename):
return False
return True
def _get_filenames(dataset_dir):
meta_names = []
image_dir = os.path.join(dataset_dir, DIRECTORY_IMAGES)
for filename in os.listdir(image_dir):
meta_names.append(filename[:-4])
return meta_names
def _convert_dataset(split_name, filenames, dataset_dir, meta_dir):
"""Convert the given filenames to a TFRecord dataset."""
assert split_name in ['train', 'validation']
output_filename = _get_dataset_name(dataset_dir, split_name)
with tf.python_io.TFRecordWriter(output_filename) as tfrecord_writer:
for i in range(len(filenames)):
sys.stdout.write('\r>> Converting image %d/%d to %s dataset.' % (i+1, len(filenames), split_name))
sys.stdout.flush()
imagename = os.path.join(meta_dir, DIRECTORY_IMAGES, filenames[i] + '.jpg')
anotname = os.path.join(meta_dir, DIRECTORY_ANNOTATIONS, filenames[i] + '.xml')
if tf.gfile.Exists(imagename) and tf.gfile.Exists(anotname):
example = _image_to_tfexample(imagename, anotname)
tfrecord_writer.write(example.SerializeToString())
sys.stdout.write('\n')
sys.stdout.flush()
def main(_):
"""Run the conversion operation."""
if not tf.gfile.Exists(FLAGS.dataset_dir):
tf.gfile.MakeDirs(FLAGS.dataset_dir)
if _dataset_exist(FLAGS.dataset_dir):
print('Dataset files already exist. Existing without recreate files.')
return
classes_id_to_name = {value[0]: key for key, value in VOC_LABELS.items()}
meta_filenames = _get_filenames(FLAGS.meta_directory)
# Divide into training and validation
random.seed()
random.shuffle(meta_filenames)
train_filenames = meta_filenames[_NUM_VALIDATION:]
validation_filenames = meta_filenames[:_NUM_VALIDATION]
# convert the training and validation
_convert_dataset('train', train_filenames, FLAGS.dataset_dir, FLAGS.meta_directory)
_convert_dataset('validation', validation_filenames, FLAGS.dataset_dir, FLAGS.meta_directory)
# write the labels file
write_label_file(classes_id_to_name, FLAGS.dataset_dir)
print('\nFinished converting the PASCAL VOC dataset.')
if __name__ == '__main__':
tf.app.run()
| {"/datasets/pascal2012.py": ["/utils/dataset_util.py"], "/datasets/voc_to_tfrecords.py": ["/utils/dataset_util.py", "/datasets/pascal2012.py"], "/eval_ssd_network.py": ["/utils/bbox_util.py"]} |
62,155 | silenove/ssd_vgg-Tensorflow | refs/heads/master | /utils/dataset_util.py | """Contain utilities for convert datasets."""
import os
import sys
import tensorflow as tf
LABELS_FILENAME = 'labels.txt'
def write_label_file(labels_to_class_names, dataset_dir, filename=LABELS_FILENAME):
"""Write a file with the list of class names."""
labels_filename = os.path.join(dataset_dir, filename)
with tf.gfile.Open(labels_filename, 'w') as f:
for label in labels_to_class_names.keys():
class_name = labels_to_class_names[label]
f.write('%d:%s\n' % (label, class_name))
def read_label_file(dataset_dir, filename=LABELS_FILENAME):
"""Read a file with the list of class names."""
labels_filename = os.path.join(dataset_dir, filename)
with tf.gfile.Open(labels_filename, 'rb') as f:
lines = f.read().decode()
lines = lines.split('\n')
lines = filter(None, lines)
labels_to_class_names = {}
for line in lines:
index = line.index(':')
labels_to_class_names[int(line[:index])] = line[index + 1:]
return labels_to_class_names
def int64_list_feature(value):
return tf.train.Feature(int64_list=tf.train.Int64List(value=value))
def int64_feature(value):
return tf.train.Feature(int64_list=tf.train.Int64List(value=[value]))
def bytes_list_feature(value):
return tf.train.Feature(bytes_list=tf.train.BytesList(value=value))
def bytes_feature(value):
return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))
def float_list_feature(value):
return tf.train.Feature(float_list=tf.train.FloatList(value=value))
| {"/datasets/pascal2012.py": ["/utils/dataset_util.py"], "/datasets/voc_to_tfrecords.py": ["/utils/dataset_util.py", "/datasets/pascal2012.py"], "/eval_ssd_network.py": ["/utils/bbox_util.py"]} |
62,156 | silenove/ssd_vgg-Tensorflow | refs/heads/master | /utils/bbox_util.py | """Bounding boxes methods."""
import numpy as np
import tensorflow as tf
def bboxes_clip(bbox_ref, bboxes, scope=None):
"""Clip bounding boxes to a reference box.
Arguments:
bbox_ref: Reference bounding box. Nx4 or 4 shaped-Tensor.
bboxes: Bounding boxes to clip. Nx4 or 4 shaped-Tensor.
"""
with tf.name_scope(scope, 'bboxes_clip'):
bbox_ref = tf.transpose(bbox_ref)
bboxes = tf.transpose(bboxes)
# Intersection bboxes and reference bbox.
ymin = tf.maximum(bboxes[0], bbox_ref[0])
xmin = tf.maximum(bboxes[1], bbox_ref[1])
ymax = tf.minimum(bboxes[2], bbox_ref[2])
xmax = tf.minimum(bboxes[3], bbox_ref[3])
# Double check!
ymin = tf.minimum(ymin, ymax)
xmin = tf.minimum(xmin, xmax)
bboxes = tf.transpose(tf.stack([ymin, xmin, ymax, xmax], axis=0))
return bboxes
def bboxes_resize(bbox_ref, bboxes, name=None):
"""Resize bounding boxes based on a reference bounding box. Useful for updating
a collection of boxes after cropping an image.
"""
with tf.name_scope(name, 'bboxes_resize'):
v = tf.stack([bbox_ref[0], bbox_ref[1], bbox_ref[0], bbox_ref[1]])
bboxes = bboxes - v
s = tf.stack([bbox_ref[2] - bbox_ref[0],
bbox_ref[3] - bbox_ref[1],
bbox_ref[2] - bbox_ref[0],
bbox_ref[3] - bbox_ref[1]])
bboxes = bboxes / s
return bboxes
def bboxes_sort_all_classes(dict_scores, dict_bboxes, top_k=400, scope=None):
"""Sort bounding boxes by decreasing order and keep only the top_k.
Arguments:
scores: Dictionary, item - batch_size x -1 Tensor float scores.
bboxes: Dictionary, item - batch_size x -1 x 4 Tensor bounding boxes.
"""
def gather(bboxes, idxes):
bboxes_gather = tf.gather(bboxes, idxes)
return bboxes_gather
def bboxes_sort_one_class(scores, bboxes, top_k):
scores_sorted, idxes = tf.nn.top_k(scores, k=top_k, sorted=True)
bboxes_sorted = tf.map_fn(lambda x: gather(x[0], x[1]),
[bboxes, idxes],
dtype=bboxes.dtype,
parallel_iterations=10,
back_prop=False,
swap_memory=False,
infer_shape=True)
return scores_sorted, bboxes_sorted
with tf.name_scope(scope, 'detected_bboxes_sort', values=[dict_scores, dict_bboxes]):
dict_scores_sorted = {}
dict_bboxes_sorted = {}
for cls in dict_scores.keys():
scores_sorted, bboxes_sorted = bboxes_sort_one_class(dict_scores[cls],
dict_bboxes[cls],
top_k)
dict_scores_sorted[cls] = scores_sorted
dict_bboxes_sorted[cls] = bboxes_sorted
return dict_scores_sorted, dict_bboxes_sorted
def bboxes_nms_all_classes(dict_scores_sorted, dict_bboxes_sorted, batch_size,
nms_threshold=0.5, keep_top_k=200,
scope=None):
"""Apply non-maximum selection to bounding boxes.
Arguments:
dict_scores_sorted: Dictionary (class: scores), scores - batch x top_k.
dict_bboxes_sorted: Dictionary (class: bboxes), bboxes - batch x top_k x 4.
"""
def bboxes_nms_one_class(scores_sorted, bboxes_sorted, batch_size,
nms_threshold, keep_top_k):
scores_batches = []
bboxes_batches = []
for i in range(batch_size):
idxes, _ = tf.image.non_max_suppression_padded(bboxes_sorted[i],
scores_sorted[i],
keep_top_k,
nms_threshold,
pad_to_max_output_size=True)
scores = tf.gather(scores_sorted[i], idxes)
bboxes = tf.gather(bboxes_sorted[i], idxes)
scores_batches.append(scores)
bboxes_batches.append(bboxes)
scores_batches = tf.stack(scores_batches, axis=0)
bboxes_batches = tf.stack(bboxes_batches, axis=0)
return scores_batches, bboxes_batches
with tf.name_scope(scope, 'bboxes_nms_all_classes'):
dict_scores_nms = {}
dict_bboxes_nms = {}
for cls in dict_scores_sorted.keys():
scores_nms, bboxes_nms = bboxes_nms_one_class(dict_scores_sorted[cls],
dict_bboxes_sorted[cls],
batch_size,
nms_threshold,
keep_top_k)
dict_scores_nms[cls] = scores_nms
dict_bboxes_nms[cls] = bboxes_nms
return dict_scores_nms, dict_bboxes_nms
def bboxes_filter_overlap(labels, bboxes, threshold=0.5, assign_negative=False,
scope=None):
"""Filter out bounding boxes based on (relative) overlap with reference
box [0, 0, 1, 1]. Remove completely bounding boxes, or assign negative labels
to the one outside.
"""
with tf.name_scope(scope, 'bboxes_filter', [labels, bboxes]):
scores = bboxes_intersection(tf.constant([0, 0, 1, 1], dtype=bboxes.dtype), bboxes)
mask = scores > threshold
if assign_negative:
labels = tf.where(mask, labels, -labels)
else:
labels = tf.boolean_mask(labels, mask)
bboxes = tf.boolean_mask(bboxes, mask)
return labels, bboxes
def bboxes_intersection(bbox_ref, bboxes, name=None):
"""Compute relative intersection between a reference box and a collection of
bounding boxes.
Arguments:
bbox_ref: (N, 4) of (4,) Tensor with reference bounding boxes.
bboxes: (N, 4) Tensor, collection of bounding boxes.
"""
with tf.name_scope(name, 'bboxes_intersection'):
bboxes = tf.transpose(bboxes)
bbox_ref = tf.transpose(bbox_ref)
# intersection
int_ymin = tf.maximum(bboxes[0], bbox_ref[0])
int_xmin = tf.maximum(bboxes[1], bbox_ref[1])
int_ymax = tf.minimum(bboxes[2], bbox_ref[2])
int_xmax = tf.minimum(bboxes[3], bbox_ref[3])
h = tf.maximum(int_ymax - int_ymin, 0.)
w = tf.maximum(int_xmax - int_xmin, 0.)
inter_vol = h * w
bboxes_vol = (bboxes[2] - bboxes[0]) * (bboxes[3] - bboxes[1])
scores = tf.where(tf.greater(bboxes_vol, 0.), tf.divide(inter_vol, bboxes_vol),
tf.zeros_like(inter_vol), name='intersection')
return scores
def bboxes_jaccard(bbox_ref, bboxes, name=None):
"""Compute jaccard score between a reference box and a collection of
bounding boxes.
"""
with tf.name_scope(name, 'bboxes_jaccard'):
bboxes = tf.transpose(bboxes)
bbox_ref = tf.transpose(bbox_ref)
int_ymin = tf.maximum(bboxes[0], bbox_ref[0])
int_xmin = tf.maximum(bboxes[1], bbox_ref[1])
int_ymax = tf.minimum(bboxes[2], bbox_ref[2])
int_xmax = tf.minimum(bboxes[3], bbox_ref[3])
h = tf.maximum(int_ymax - int_ymin, 0.)
w = tf.maximum(int_xmax - int_xmin, 0.)
inter_vol = h * w
union_vol = (bboxes[2] - bboxes[0]) * (bboxes[3] - bboxes[1]) + \
(bbox_ref[2] - bbox_ref[0]) * (bbox_ref[3] - bbox_ref[1]) - \
inter_vol
jaccard = tf.where(tf.greater(union_vol, 0.), tf.divide(inter_vol, union_vol),
tf.zeros_like(inter_vol), name='jaccard')
return jaccard
def bboxes_matching(label, scores, bboxes, labels_gt, bboxes_gt,
difficults_gt, matching_threshold=0.5, scope=None):
"""Matching a collection of detected boxes with groundtruth values, single-inputs."""
with tf.name_scope(scope, 'bboxes_matching_single', [scores, bboxes, labels_gt,
bboxes_gt, difficults_gt]):
total_size = tf.size(scores)
label = tf.cast(label, labels_gt.dtype)
difficults_gt = tf.cast(difficults_gt, tf.bool)
num_bboxes_gt = tf.count_nonzero(tf.logical_and(tf.equal(label, labels_gt),
tf.logical_not(difficults_gt)))
matching_gt = tf.zeros(tf.shape(labels_gt), dtype=tf.bool)
range_gt = tf.range(tf.size(labels_gt), dtype=tf.int32)
# True/False positive matching TensorArrays
tensorarray_tp = tf.TensorArray(tf.bool, size=total_size, dynamic_size=False,
infer_shape=True)
tensorarray_fp = tf.TensorArray(tf.bool, size=total_size, dynamic_size=False,
infer_shape=True)
# Loop
def condition(i, ta_tp, ta_fp, matching):
r = tf.less(i, total_size)
return r
def body(i, ta_tp, ta_fp, matching_gt):
# Jaccard score with gt bboxes
bbox = bboxes[i]
jaccard = bboxes_jaccard(bbox, bboxes_gt)
jaccard = jaccard * tf.cast(tf.equal(label, labels_gt), jaccard.dtype)
max_idx = tf.cast(tf.argmax(jaccard, axis=0), tf.int32)
max_jaccard = jaccard[max_idx]
match = max_jaccard > matching_threshold
is_exist = matching_gt[max_idx]
not_difficult = tf.logical_not(difficults_gt[max_idx])
tp = tf.logical_and(not_difficult,
tf.logical_and(match, tf.logical_not(is_exist)))
ta_tp = ta_tp.write(i, tp)
fp = tf.logical_and(not_difficult,
tf.logical_or(tf.logical_not(match), is_exist))
ta_fp = ta_fp.write(i, fp)
mask = tf.logical_and(tf.equal(range_gt, max_idx),
tf.logical_and(not_difficult, match))
matching_gt = tf.logical_or(matching_gt, mask)
return [i+1, ta_tp, ta_fp, matching_gt]
i = 0
[i, tensorarray_tp, tensorarray_fp, matching_gt] = tf.while_loop(
condition, body, [i, tensorarray_tp, tensorarray_fp, matching_gt],
parallel_iterations=1, back_prop=False
)
tp_match = tf.reshape(tensorarray_tp.stack(), tf.shape(scores))
fp_match = tf.reshape(tensorarray_fp.stack(), tf.shape(scores))
return num_bboxes_gt, tp_match, fp_match
def bboxes_matching_batch(labels, scores, bboxes,
labels_gt, bboxes_gt, difficults_gt,
matching_threshold=0.5, scope=None):
"""Matching a collection of detected boxes with groundtruth values, batched-inputs."""
with tf.name_scope(scope, 'bboxes_matching_batch', [scores, bboxes, labels_gt,
bboxes_gt, difficults_gt]):
dict_num_bboxes_gt = {}
dict_tp = {}
dict_fp = {}
for label in labels:
n, tp, fp = tf.map_fn(
lambda x: bboxes_matching(label, x[0], x[1], x[2], x[3], x[4], matching_threshold),
(scores[label], bboxes[label], labels_gt, bboxes_gt, difficults_gt),
dtype=(tf.int64, tf.bool, tf.bool),
parallel_iterations=10,
back_prop=False,
swap_memory=True,
infer_shape=True,
)
dict_num_bboxes_gt[label] = n
dict_tp[label] = tp
dict_fp[label] = fp
return dict_num_bboxes_gt, dict_tp, dict_fp
| {"/datasets/pascal2012.py": ["/utils/dataset_util.py"], "/datasets/voc_to_tfrecords.py": ["/utils/dataset_util.py", "/datasets/pascal2012.py"], "/eval_ssd_network.py": ["/utils/bbox_util.py"]} |
62,157 | silenove/ssd_vgg-Tensorflow | refs/heads/master | /nets/ssd_vgg_300.py | """300 VGG-based SSD model.
This model was initially introduced in:
SSD: Single Shot MultiBox Detector
Wei Liu, Dragomir Anguelov, Dumitru Erhan, Christian Szegedy, Scott Reed,
Cheng-Yang Fu, Alexander C. Berg
https://arxiv.org/abs/1512.02325
@@ssd_vgg_300
"""
import numpy as np
import tensorflow as tf
import math
from collections import namedtuple
from utils import network_util, bbox_util
slim = tf.contrib.slim
SSD_params = namedtuple('SSD_params', ['image_size', 'batch_size', 'num_classes', 'featmap_layers',
'featmap_size', 'num_anchors', 'anchor_steps', 'anchor_offset',
'S_min', 'S_max', 'box_scales', 'anchor_ratios',
'prior_variance'])
class SSDNet(object):
def __init__(self, ssd_params=None):
if ssd_params is None:
self.ssd_params = SSD_params(image_size=(300, 300),
batch_size=4,
num_classes=21,
featmap_layers=['conv4', 'conv7', 'conv8',
'conv9', 'conv10', 'conv11'],
featmap_size=[(38, 38), (19, 19), (10, 10), (5, 5), (3, 3), (1, 1)], # (h, w)
num_anchors=[4, 6, 6, 6, 4, 4],
anchor_steps=[8, 16, 32, 64, 100, 300],
anchor_offset=0.5,
S_min=0.15,
S_max=0.9,
box_scales=[],
anchor_ratios=[[2, 1 / 2],
[2, 3, 1 / 2, 1 / 3],
[2, 3, 1 / 2, 1 / 3],
[2, 3, 1 / 2, 1 / 3],
[2, 1 / 2],
[2, 1 / 2]],
prior_variance=[0.1, 0.1, 0.2, 0.2])
else:
self.ssd_params = ssd_params
self._compute_box_scales()
def set_batch_size(self, batch_size):
self.ssd_params = self.ssd_params._replace(batch_size=batch_size)
def _ssd_vgg_300_base_network(self, inputs, reuse=None, scope=None):
"""Define the base nets of 300 VGG-based SSD.
input image : batch_size x 300 x 300 x channels.
convolution layers default stride = 1, padding = 'SAME'
maxpool layers default stride = 2, padding = 'VALID'
"""
end_points = {}
with tf.variable_scope(scope, 'ssd_vgg_300', [inputs], reuse=reuse):
# Original VGG-16 nets
# input: batch_size x 300 x 300 x channels
net = slim.repeat(inputs, 2, slim.conv2d, 64, [3, 3], scope='conv1')
end_points['conv1'] = net
net = slim.max_pool2d(net, [2, 2], stride=2, scope='pool1')
# tensor: batch_size x 150 x 150 x 64
net = slim.repeat(net, 2, slim.conv2d, 128, [3, 3], scope='conv2')
end_points['conv2'] = net
net = slim.max_pool2d(net, [2, 2], stride=2, scope='pool2')
# tensor: batch_size x 75 x 75 x 128
net = slim.repeat(net, 3, slim.conv2d, 256, [3, 3], scope='conv3')
end_points['conv3'] = net
net = slim.max_pool2d(net, [2, 2], stride=2, padding='SAME', scope='pool3')
# tensor: batch_size x 38 x 38 x 256
net = slim.repeat(net, 3, slim.conv2d, 512, [3, 3], scope='conv4')
end_points['conv4'] = net
net = slim.max_pool2d(net, [2, 2], stride=1, padding='SAME', scope='pool4')
# tensor: batch_size x 38 x 38 x 512
net = slim.repeat(net, 3, slim.conv2d, 512, [3, 3], scope='conv5')
end_points['conv5'] = net
net = slim.max_pool2d(net, [2, 2], stride=1, padding='SAME', scope='pool5')
# SSD nets
# tensor: batch_size x 38 x 38 x 512
net = slim.conv2d(net, 1024, [3, 3], rate=6, scope='conv6')
end_points['conv6'] = net
# tensor: batch_size x 19 x 19 x 1024
net = slim.conv2d(net, 1024, [1, 1], stride=2, scope='conv7')
end_points['conv7'] = net
net = slim.max_pool2d(net, [2, 2], stride=1, padding='SAME', scope='pool7')
# tensor: batch_size 19 x 19 x 1024
net = slim.conv2d(net, 256, [1, 1], scope='conv8_1x1')
net = slim.conv2d(net, 512, [3, 3], stride=2, scope='conv8_3x3')
end_points['conv8'] = net
# tensor: batch_size x 10 x 10 x 512
net = slim.conv2d(net, 128, [1, 1], scope='conv9_1x1')
net = slim.conv2d(net, 256, [3, 3], stride=2, scope='conv9_3x3')
end_points['conv9'] = net
# tensor: batch_size x 5 x 5 x 256
net = slim.conv2d(net, 128, [1, 1], scope='conv10_1x1')
net = slim.conv2d(net, 256, [3, 3], stride=2, scope='conv10_3x3')
end_points['conv10'] = net
# tensor: batch_size x 3 x 3 x 256
net = slim.conv2d(net, 128, [1, 1], scope='conv11_1x1')
net = slim.conv2d(net, 256, [3, 3], stride=1, padding='VALID', scope='conv11_3x3')
end_points['conv11'] = net
# tensor: batch_size x 1 x 1 x 256
for i, layer in enumerate(self.ssd_params.featmap_layers):
if i == 0:
# conv4 layer
with tf.variable_scope(layer+'_mbox'):
# classes
norm = slim.batch_norm(end_points[layer], decay=0.9997, epsilon=0.000001,
scope=layer + '_norm')
norm_mbox_conf_perm = slim.conv2d(norm,
self.ssd_params.num_classes * self.ssd_params.num_anchors[i],
[3, 3],
scope=layer + '_norm_mbox_conf_perm')
norm_mbox_conf_flat = tf.contrib.layers.flatten(norm_mbox_conf_perm,
scope=layer + '_norm_mbox_conf_flat')
end_points[layer + '_mbox_conf_flat'] = norm_mbox_conf_flat
# bounding box
norm_mbox_loc_perm = slim.conv2d(norm,
self.ssd_params.num_anchors[i] * 4,
[3, 3],
scope=layer + '_norm_mbox_loc_perm')
norm_mbox_loc_flat = tf.contrib.layers.flatten(norm_mbox_loc_perm,
scope=layer + '_norm_mbox_loc_flat')
end_points[layer + '_mbox_loc_flat'] = norm_mbox_loc_flat
else:
# conv7, conv8, conv9, conv10, conv11
with tf.variable_scope(layer+'_mbox'):
# classes
mbox_conf_perm = slim.conv2d(end_points[layer],
self.ssd_params.num_classes * self.ssd_params.num_anchors[i],
[3, 3],
scope=layer + '_mbox_conf_perm')
mbox_conf_flat = tf.contrib.layers.flatten(mbox_conf_perm,
scope=layer + '_mbox_conf_flat')
end_points[layer + '_mbox_conf_flat'] = mbox_conf_flat
# bounding box
mbox_loc_perm = slim.conv2d(end_points[layer],
self.ssd_params.num_anchors[i] * 4,
[3, 3],
scope=layer + '_mbox_loc_perm')
mbox_loc_flat = tf.contrib.layers.flatten(mbox_loc_perm,
scope=layer + '_mbox_loc_flat')
end_points[layer + '_mbox_loc_flat'] = mbox_loc_flat
# concatenate and reshape
mbox_conf = tf.concat([end_points[layer + '_mbox_conf_flat'] for layer in self.ssd_params.featmap_layers],
axis=-1)
mbox_conf_reshape = tf.reshape(mbox_conf, [self.ssd_params.batch_size, -1, self.ssd_params.num_classes])
end_points['mbox_conf_reshape'] = mbox_conf_reshape
mbox_loc = tf.concat([end_points[layer + '_mbox_loc_flat'] for layer in self.ssd_params.featmap_layers],
axis=-1)
mbox_loc_reshape = tf.reshape(mbox_loc, [self.ssd_params.batch_size, -1, 4])
end_points['mbox_loc_reshape'] = mbox_loc_reshape
return end_points['mbox_conf_reshape'], end_points['mbox_loc_reshape'], end_points
def ssd_vgg_300_net(self, inputs, is_training=True, reuse=None, scope='ssd_vgg_300'):
"""Creates the 300 VGG-based SSD model."""
end_points = {}
with slim.arg_scope([slim.batch_norm], is_training=is_training):
logits, locs, end_points = self._ssd_vgg_300_base_network(inputs, reuse, scope=scope)
return logits, locs, end_points
def ssd_arg_scope(self,
activation_fn=tf.nn.relu,
weights_initializer=tf.contrib.layers.xavier_initializer(),
biases_initializer=tf.zeros_initializer(),
weight_decay=0.00004):
"""Define ssd arg scope."""
with slim.arg_scope([slim.conv2d], activation_fn=activation_fn,
weights_initializer=weights_initializer,
biases_initializer=biases_initializer,
weights_regularizer=slim.l2_regularizer(weight_decay),
biases_regularizer=slim.l2_regularizer(weight_decay)) as sc:
return sc
def _compute_box_scales(self):
"""Compute the scales of the default boxes for each feature map."""
num_layers = len(self.ssd_params.featmap_layers)
for k in range(num_layers):
if k == 0:
self.ssd_params.box_scales.append(self.ssd_params.S_min)
elif k == num_layers - 1:
self.ssd_params.box_scales.append(self.ssd_params.S_max)
else:
s_k = self.ssd_params.S_min + (self.ssd_params.S_max - self.ssd_params.S_min) / \
(num_layers - 1) * (k - 1)
self.ssd_params.box_scales.append(s_k)
def _anchor_for_one_layer(self, layer_idx, dtype=np.float32):
"""Compute the relative coordinate of the SSD default anchors for a one feature layer.
Arguments:
layer_idx: the index of feature layer.
Return:
y, x, w, h: the anchors coordinate.
"""
y, x = np.mgrid[0:self.ssd_params.featmap_size[layer_idx][0],
0:self.ssd_params.featmap_size[layer_idx][1]]
y = y.astype(dtype)
x = x.astype(dtype)
y = ((y + self.ssd_params.anchor_offset) * self.ssd_params.anchor_steps[layer_idx]) / \
self.ssd_params.image_size[0]
x = ((x + self.ssd_params.anchor_offset) * self.ssd_params.anchor_steps[layer_idx]) / \
self.ssd_params.image_size[1]
# Change the shape to h x w x num_anchors.
y = np.expand_dims(y, axis=-1)
y = np.concatenate([y for _ in range(self.ssd_params.num_anchors[layer_idx])], axis=-1)
x = np.expand_dims(x, axis=-1)
x = np.concatenate([x for _ in range(self.ssd_params.num_anchors[layer_idx])], axis=-1)
h = np.zeros([self.ssd_params.num_anchors[layer_idx]], dtype=dtype)
w = np.zeros([self.ssd_params.num_anchors[layer_idx]], dtype=dtype)
for i in range(self.ssd_params.num_anchors[layer_idx]):
if i == 0:
h[i] = self.ssd_params.box_scales[layer_idx]
w[i] = self.ssd_params.box_scales[layer_idx]
elif i == self.ssd_params.num_anchors[layer_idx] - 1:
if layer_idx < len(self.ssd_params.featmap_layers) - 1:
s = np.sqrt(self.ssd_params.box_scales[layer_idx] *
self.ssd_params.box_scales[layer_idx + 1])
h[i] = s
w[i] = s
else:
# The last feature map.
s = np.sqrt(self.ssd_params.box_scales[layer_idx] *
(self.ssd_params.box_scales[layer_idx] + 1) / 2)
h[i] = s
w[i] = s
else:
for a_r in self.ssd_params.anchor_ratios:
h[i] = self.ssd_params.box_scales[layer_idx] * \
np.sqrt(self.ssd_params.anchor_ratios[i - 1][0])
w[i] = self.ssd_params.box_scales[layer_idx] / \
np.sqrt(self.ssd_params.anchor_ratios[i - 1][1])
return y, x, h, w
def anchors_for_all_layer(self):
"""Compute the relative coordinate of the SSD default anchors for all feature map."""
anchors_cords = {}
for i, layer in enumerate(self.ssd_params.featmap_layers):
anchors_cords[layer] = self._anchor_for_one_layer(i)
return anchors_cords
def bboxes_encode(self, anchors, gt_labels, gt_bboxes,
match_threshold=0.5, dtype=tf.float32, scope=None):
"""Encode labels and bounding boxes."""
labels, scores, locs = network_util.encode_gtbboxes_from_all_layers(self.ssd_params,
anchors,
gt_labels,
gt_bboxes,
match_threshold,
dtype,
scope)
labels = tf.concat([tf.reshape(x, [-1]) for x in labels], axis=0)
scores = tf.concat([tf.reshape(x, [-1]) for x in scores], axis=0)
locs = tf.concat([tf.reshape(x, [-1]) for x in locs], axis=0)
return labels, scores, locs
def bboxes_decode(self, locs_pred, anchors, scope='ssd_bboxes_decode'):
"""Decode labels and bounding boxes."""
bboxes = []
for i, layer in enumerate(self.ssd_params.featmap_layers):
y, x, h, w = anchors[layer]
featmap_h = self.ssd_params.featmap_size[i][0]
featmap_w = self.ssd_params.featmap_size[i][1]
h = tf.reshape(tf.concat([h]*featmap_h*featmap_w, axis=-1),
[featmap_h, featmap_w, -1])
w = tf.reshape(tf.concat([w]*featmap_h*featmap_w, axis=-1),
[featmap_h, featmap_w, -1])
bboxes.append(tf.reshape(tf.stack([y,x,h,w], axis=-1), [-1, 4]))
bboxes = tf.concat(bboxes, axis=0)
bboxes_batches = tf.concat([tf.expand_dims(bboxes, axis=0)]*self.ssd_params.batch_size, axis=0)
anchors = [bboxes_batches[:, :, 0], bboxes_batches[:, :, 1],
bboxes_batches[:, :, 2], bboxes_batches[:, :, 3]]
return network_util.decode_bboxes_from_all_layer(locs_pred,
anchors,
prior_scaling=self.ssd_params.prior_variance,
scope=scope)
def detected_bboxes(self, predictions, localizations, select_threshold=0.01,
nms_threshold=0.5, top_k=400, keep_top_k=200):
"""Get the detected bounding boxes from SSD Model output."""
scores_select, bboxes_select = network_util.select_detected_bboxes_all_classes(predictions,
localizations,
self.ssd_params.num_classes,
select_threshold)
dict_scores_sorted, dict_bboxes_sorted = bbox_util.bboxes_sort_all_classes(scores_select,
bboxes_select,
top_k)
dict_scores_nms, dict_bboxes_nms = bbox_util.bboxes_nms_all_classes(dict_scores_sorted,
dict_bboxes_sorted,
self.ssd_params.batch_size,
nms_threshold,
keep_top_k)
return dict_scores_nms, dict_bboxes_nms
def ssd_class_and_loc_losses(self,
logits_pred,
localization_pred,
classes_gt,
localization_gt,
scores_gt,
match_threshold=0.5,
negative_ratio=3.0,
alpha=1.0,
label_smoothing=0.0,
scope=None):
"""Compute the SSD nets losses including classification and localization.
Arguments:
logits_pred: SSD nets output, batch_size x -1 x 21.
localization_pred: SSD nets output, batch_size x -1 x 4.
classes_gt: gt classes,
"""
with tf.name_scope(scope, 'ssd_losses'):
# Reshape all tensors.
logits_pred_flat = tf.reshape(logits_pred, [-1, self.ssd_params.num_classes])
localization_pred_flat = tf.reshape(localization_pred, [-1, 4])
classes_gt_flat = tf.reshape(classes_gt, [-1])
localization_gt_flat = tf.reshape(localization_gt, [-1, 4])
scores_gt_flat = tf.reshape(scores_gt, [-1])
dtype = logits_pred_flat.dtype
# Compute positive matching mask
posi_mask = scores_gt_flat > match_threshold
f_posi_mask = tf.cast(posi_mask, dtype)
n_positive = tf.reduce_sum(f_posi_mask)
# Hard negative mining
neg_mask = tf.logical_not(posi_mask)
f_neg_mask = tf.cast(neg_mask, dtype)
logits_pred_softmax = slim.softmax(logits_pred_flat)
neg_values = tf.where(neg_mask, logits_pred_softmax[:, 0], 1. - f_neg_mask)
# Number of negative entries to select.
max_neg_entries = tf.cast(tf.reduce_sum(f_neg_mask), tf.int32)
n_neg = tf.cast(negative_ratio * n_positive, tf.int32)
n_neg = tf.minimum(n_neg, max_neg_entries)
vals, idxes = tf.nn.top_k(-1 * neg_values, k=n_neg)
max_hard_pred = -1 * vals[-1]
# Final negative mask
neg_mask = tf.logical_and(neg_mask, neg_values < max_hard_pred)
f_neg_mask = tf.cast(neg_mask, dtype)
with tf.name_scope('cross_entropy'):
loss = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits_pred_flat,
labels=classes_gt_flat)
loss_posi = tf.div(tf.reduce_sum(loss * f_posi_mask), tf.reduce_sum(f_posi_mask),
name='positive_loss')
loss_neg = tf.div(tf.reduce_sum(loss * f_neg_mask), tf.reduce_sum(f_neg_mask),
name='negative_loss')
tf.losses.add_loss(loss_posi)
tf.losses.add_loss(loss_neg)
with tf.name_scope('localization_loss'):
loss = network_util.abs_smooth_L1(localization_pred_flat - localization_gt_flat)
loss_loc = tf.div(tf.reduce_sum(loss * tf.expand_dims(f_posi_mask, axis=-1)) * alpha,
tf.reduce_sum(f_posi_mask),
name='localization_loss')
tf.losses.add_loss(loss_loc)
| {"/datasets/pascal2012.py": ["/utils/dataset_util.py"], "/datasets/voc_to_tfrecords.py": ["/utils/dataset_util.py", "/datasets/pascal2012.py"], "/eval_ssd_network.py": ["/utils/bbox_util.py"]} |
62,158 | silenove/ssd_vgg-Tensorflow | refs/heads/master | /eval_ssd_network.py | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Generic evaluation script that evaluates a model using a given dataset."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import tensorflow as tf
from datasets import dataset_factory
from nets import nets_factory, ssd_vgg_300
from preprocessing import preprocessing_factory
from utils.bbox_util import bboxes_matching_batch
from utils.metrics import *
slim = tf.contrib.slim
tf.app.flags.DEFINE_integer(
'batch_size', 1, 'The number of samples in each batch.')
tf.app.flags.DEFINE_integer(
'max_num_batches', None,
'Max number of batches to evaluate by default use all.')
tf.app.flags.DEFINE_string(
'master', '', 'The address of the TensorFlow master to use.')
tf.app.flags.DEFINE_string(
'checkpoint_path', '/tmp/tfmodel/',
'The directory where the model was written to or an absolute path to a '
'checkpoint file.')
tf.app.flags.DEFINE_string(
'eval_dir', '/tmp/tfmodel/', 'Directory where the results are saved to.')
tf.app.flags.DEFINE_integer(
'num_preprocessing_threads', 4,
'The number of threads used to create the batches.')
tf.app.flags.DEFINE_string(
'dataset_name', 'pascal2012', 'The name of the dataset to load.')
tf.app.flags.DEFINE_string(
'dataset_split_name', 'validation', 'The name of the train/test split.')
tf.app.flags.DEFINE_string(
'dataset_dir', None, 'The directory where the dataset files are stored.')
tf.app.flags.DEFINE_integer(
'labels_offset', 0,
'An offset for the labels in the dataset. This flag is primarily used to '
'evaluate the VGG and ResNet architectures which do not use a background '
'class for the ImageNet dataset.')
tf.app.flags.DEFINE_string(
'model_name', 'inception_v3', 'The name of the architecture to evaluate.')
tf.app.flags.DEFINE_string(
'preprocessing_name', 'ssd_vgg_300', 'The name of the preprocessing to use. If left '
'as `None`, then the model_name flag is used.')
tf.app.flags.DEFINE_float(
'moving_average_decay', None,
'The decay to use for the moving average.'
'If left as None, then moving averages are not used.')
tf.app.flags.DEFINE_integer(
'eval_image_size_height', None, 'Eval image height in pixel.')
tf.app.flags.DEFINE_integer(
'eval_image_size_width', None, 'Eval image width in pixel.')
tf.app.flags.DEFINE_float(
'select_threshold', 0.01, 'selection threshold.'
)
tf.app.flags.DEFINE_float(
'nms_threshold', 0.5, 'Non-Maximum selection threshold.'
)
tf.app.flags.DEFINE_integer(
'select_top_k', 400, 'select top k bboxes per class.'
)
tf.app.flags.DEFINE_integer(
'keep_top_k', 200, 'Non-Maximum selection keep top k bboxes per class.'
)
tf.app.flags.DEFINE_boolean(
'remove_difficult', True, 'Remove difficult objects from evaluation.'
)
tf.app.flags.DEFINE_float(
'matching_threshold', 0.5, 'bboxes matching threshold.'
)
tf.app.flags.DEFINE_float(
'gpu_memory_fraction', 0.8, 'GPU memory using fraction.'
)
FLAGS = tf.app.flags.FLAGS
DATA_FORMAT = 'NHWC'
def main(_):
if not FLAGS.dataset_dir:
raise ValueError('You must supply the dataset directory with --dataset_dir')
tf.logging.set_verbosity(tf.logging.INFO)
with tf.Graph().as_default():
tf_global_step = slim.get_or_create_global_step()
######################
# Select the dataset #
######################
dataset = dataset_factory.get_dataset(
FLAGS.dataset_name, FLAGS.dataset_split_name, FLAGS.dataset_dir)
####################
# Select the model #
####################
ssd_model = ssd_vgg_300.SSDNet()
ssd_model.set_batch_size(FLAGS.batch_size)
network_fn = nets_factory.get_network_fn(
ssd_model, is_training=False)
##############################################################
# Create a dataset provider that loads data from the dataset #
##############################################################
provider = slim.dataset_data_provider.DatasetDataProvider(
dataset,
common_queue_capacity=20 * FLAGS.batch_size,
common_queue_min=10 * FLAGS.batch_size)
[image, labels, bboxes] = provider.get(['image', 'object/label', 'object/bbox'])
labels -= FLAGS.labels_offset
if FLAGS.remove_difficult:
difficults_gt = provider.get(['object/difficult'])
else:
difficults_gt = tf.zeros(tf.shape(labels), dtype=tf.int64)
#####################################
# Select the preprocessing function #
#####################################
preprocessing_name = FLAGS.preprocessing_name or FLAGS.model_name
image_preprocessing_fn = preprocessing_factory.get_preprocessing(preprocessing_name)
eval_image_size_height = FLAGS.eval_image_size_height or ssd_model.ssd_params.image_size[0]
eval_image_size_width = FLAGS.eval_image_size_width or ssd_model.ssd_params.image_size[1]
image, labels_gt, bboxes_gt = image_preprocessing_fn(image, labels, bboxes,
eval_image_size_height, eval_image_size_width,
data_format=DATA_FORMAT,
is_training=False)
anchors = ssd_model.anchors_for_all_layer()
labels_en, scores_en, bboxes_en = ssd_model.bboxes_encode(anchors, labels_gt, bboxes_gt)
images, labels_gt, bboxes_gt, difficults_gt, labels_en, scores_en, bboxes_en = \
tf.train.batch(
[image, labels_gt, bboxes_gt, difficults_gt,labels_en, scores_en, bboxes_en],
batch_size=FLAGS.batch_size,
num_threads=FLAGS.num_preprocessing_threads,
capacity=5 * FLAGS.batch_size,
dynamic_pad=True)
################################
# SSD Model + outputs decoding #
################################
logits, locs, endpoints = network_fn(images)
ssd_model.ssd_class_and_loc_losses(logits, locs, labels_en, bboxes_en, scores_en)
# Performing post_processing on CPU: loop-intensive, usually more efficient.
with tf.device('/device:CPU:0'):
# Detect objects from SSD Model outputs
locs_aggr = ssd_model.bboxes_decode(locs, anchors)
scores_nms, bboxes_nms = ssd_model.detected_bboxes(logits,
locs_aggr,
FLAGS.select_threshold,
FLAGS.nms_threshold,
FLAGS.select_top_k,
FLAGS.keep_top_k)
num_bboxes_gt, tp, fp = bboxes_matching_batch(scores_nms.keys(), scores_nms,
bboxes_nms, labels_gt, bboxes_gt,
difficults_gt,
matching_threshold=FLAGS.matching_threshold)
if FLAGS.moving_average_decay:
variable_averages = tf.train.ExponentialMovingAverage(
FLAGS.moving_average_decay, tf_global_step)
variables_to_restore = variable_averages.variables_to_restore(
slim.get_model_variables())
variables_to_restore[tf_global_step.op.name] = tf_global_step
else:
variables_to_restore = slim.get_variables_to_restore()
# Define the metrics:
with tf.device('/device:CPU:0'):
dict_metrics = {}
# First add all losses.
for loss in tf.get_collection(tf.GraphKeys.LOSSES):
dict_metrics[loss.op.name] = slim.metrics.streaming_mean(loss)
# Extra losses as well.
for loss in tf.get_collection('EXTRA_LOSSES'):
dict_metrics[loss.op.name] = slim.metrics.streaming_mean(loss)
# Add metrics to summaries and Print on screen.
for name, metric in dict_metrics.items():
# summary_name = 'eval/%s' % name
summary_name = name
op = tf.summary.scalar(summary_name, metric[0], collections=[])
# op = tf.Print(op, [metric[0]], summary_name)
tf.add_to_collection(tf.GraphKeys.SUMMARIES, op)
# FP and TP metrics.
tp_fp_metric = streaming_tp_fp_arrays(num_bboxes_gt, tp, fp, scores_nms)
for c in tp_fp_metric[0].keys():
dict_metrics['tp_fp_%s' % c] = (tp_fp_metric[0][c],
tp_fp_metric[1][c])
# Add to summaries precision/recall values.
aps_voc12 = {}
for c in tp_fp_metric[0].keys():
# Precison and recall values.
prec, rec = precision_recall(*tp_fp_metric[0][c])
# Average precision VOC12.
v = average_precision_voc12(prec, rec)
summary_name = 'AP_VOC12/%s' % c
op = tf.summary.scalar(summary_name, v, collections=[])
# op = tf.Print(op, [v], summary_name)
tf.add_to_collection(tf.GraphKeys.SUMMARIES, op)
aps_voc12[c] = v
# Mean average precision VOC12.
summary_name = 'AP_VOC12/mAP'
mAP = tf.add_n(list(aps_voc12.values())) / len(aps_voc12)
op = tf.summary.scalar(summary_name, mAP, collections=[])
op = tf.Print(op, [mAP], summary_name)
tf.add_to_collection(tf.GraphKeys.SUMMARIES, op)
# Split into values and updates ops.
names_to_values, names_to_updates = slim.metrics.aggregate_metric_map(dict_metrics)
gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=FLAGS.gpu_memory_fraction)
config = tf.ConfigProto(log_device_placement=False, gpu_options=gpu_options)
# TODO(sguada) use num_epochs=1
if FLAGS.max_num_batches:
num_batches = FLAGS.max_num_batches
else:
# This ensures that we make a single pass over all of the data.
num_batches = math.ceil(dataset.num_samples / float(FLAGS.batch_size))
if tf.gfile.IsDirectory(FLAGS.checkpoint_path):
checkpoint_path = tf.train.latest_checkpoint(FLAGS.checkpoint_path)
else:
checkpoint_path = FLAGS.checkpoint_path
tf.logging.info('Evaluating %s' % checkpoint_path)
slim.evaluation.evaluate_once(
master=FLAGS.master,
checkpoint_path=checkpoint_path,
logdir=FLAGS.eval_dir,
num_evals=num_batches,
eval_op=list(names_to_updates.values()),
variables_to_restore=variables_to_restore)
if __name__ == '__main__':
tf.app.run()
| {"/datasets/pascal2012.py": ["/utils/dataset_util.py"], "/datasets/voc_to_tfrecords.py": ["/utils/dataset_util.py", "/datasets/pascal2012.py"], "/eval_ssd_network.py": ["/utils/bbox_util.py"]} |
62,161 | Bobbyt1500/3x3Number_Puzzle_Solver | refs/heads/master | /solver.py | """
Solver.py solves the puzzle using A* Search
"""
import copy
import time
class Node:
def __init__(self, board, parent, action, moves, accuracy):
self.board = board
self.parent = parent
self.action = action
self.moves = moves
self.accuracy = accuracy
class PriorityQueue():
def __init__(self):
self.list = []
def contains_board(self, board):
for node in self.list:
if node.board == board:
return True
return False
def add(self, node):
self.list.append(node)
def empty(self):
return len(self.list) < 1
def remove(self):
best_node = self.list[0]
for test_node in self.list:
if test_node.moves + test_node.accuracy < best_node.moves + best_node.accuracy:
best_node = test_node
self.list.remove(best_node)
return best_node
def find_solution(starting_board):
"""
Returns solution after applying A* Search
"""
queue = PriorityQueue()
node = Node(starting_board, None, None, 0, get_accuracy(starting_board))
queue.add(node)
explored = []
while True:
if queue.empty():
print("No solution")
return None
node = queue.remove()
explored.append(node.board)
potential_actions = get_actions(node.board)
for action in potential_actions:
new_board = apply_action(node.board, action)
if not queue.contains_board(new_board) and not new_board in explored:
if is_terminal(new_board):
new_node = Node(new_board, node, action, node.moves+1, get_accuracy(new_board))
return get_solution(new_node)
new_node = Node(new_board, node, action, node.moves+1, get_accuracy(new_board))
queue.add(new_node)
def apply_action(board, action):
"""
Returns the resulting board from an action on a board
"""
new_board = copy.deepcopy(board)
for i in range(3):
for j in range(3):
if board[i][j] == "0":
new_board[i][j] = action
if board[i][j] == action:
new_board[i][j] = "0"
return new_board
def get_accuracy(board):
"""
A Heuristic which returns a point value of a board based on the location of each number
Lower is better
"""
score = 18
columns = []
for i in range(3):
columns.append([])
for i in range(3):
for j in range(3):
columns[j].append(board[i][j])
for i in range(3):
for j in range(3):
number = (i*3) + j+1
if number == 9:
number = 0
if str(number) in columns[j]:
score -= 1
if str(number) in board[i]:
score -= 1
return score
def get_solution(final_node):
"""
Returns a list of actions made to get to the solution
"""
solution = []
node = final_node
while node.action:
solution.append(node.action)
node = node.parent
solution.reverse()
return solution
def get_actions(board):
"""
Returns all actions the AI can make
"""
actions = []
for i in range(3):
for j in range(3):
if board[i][j] == "0":
if i + 1 < 3:
actions.append(board[i+1][j])
if i - 1 > -1:
actions.append(board[i-1][j])
if j + 1 < 3:
actions.append(board[i][j+1])
if j - 1 > -1:
actions.append(board[i][j-1])
return actions
def is_terminal(board):
"""
Determines if the board is a winning board or not
"""
terminal_board = [['1', '2', '3'], ['4','5','6'],['7','8','0']]
if board == terminal_board:
return True
else:
return False
| {"/main.py": ["/solver.py"]} |
62,162 | Bobbyt1500/3x3Number_Puzzle_Solver | refs/heads/master | /main.py | """
Main.py gets the data from the user in an easy to use gui
"""
import tkinter as tk
import math
import solver
root = tk.Tk()
root.title("Sliding Number Puzzle Solver")
root.geometry("500x700")
grid_frames = []
grid_entries = []
root.resizable(0,0)
def main():
board = get_inputs()
if board:
solution = solver.find_solution(board)
formatted_solution = " -> ".join(solution)
create_solution_box(formatted_solution)
def clear_grid():
for entry in grid_entries:
entry.delete(0, 1000)
def create_solution_box(solution):
box = tk.Toplevel(bg="#4ca1c3")
box.title("Solution")
box.geometry("300x300")
solution_label = tk.Label(box,text=solution,fg="white",font="Helvetica 15 bold",bg="#4ca1c3",pady=10,wraplength=250)
solution_label.pack()
dismiss_button = tk.Button(box,text="Dismiss",relief="flat",bg="#5a4c67",activebackground="#540101",fg="white",activeforeground="white",cursor="hand1",bd=0,highlightthickness=0,width=12,height=2,font="Helvetica 10 bold",command=box.destroy)
dismiss_button.pack()
def get_inputs():
# Put inputs into a list
values = []
duplicate_check = []
for i in range(3):
values.append([])
a = 0
for entry in grid_entries:
value = entry.get()
if value == "":
value = "0"
values[a].append(value)
duplicate_check.append(value)
a+=1
if a == 3:
a = 0
# Check for duplicate values
for value in duplicate_check:
if duplicate_check.count(value) > 1:
return None
return values
def validate_function(input):
# Test if number is in the range of acceptable values
acceptable = [""]
for i in range(9):
acceptable.append(str(i))
if input in acceptable:
return True
else:
return False
validation = root.register(validate_function)
def setup_gui():
#Create widgets
board_area = tk.Frame(root, height = 500, width = 500, bg="#517796")
input_area = tk.Frame(root, height = 200, width = 500, bg="#4ca1c3")
reset_button = tk.Button(input_area,text="Clear Spaces",relief="flat",bg="#b72b3d",activebackground="#540101",fg="white",activeforeground="white",cursor="hand1",bd=0,highlightthickness=0,width=12,height=2,font="Helvetica 10 bold",command=clear_grid)
find_button = tk.Button(input_area,text="Find Solution",relief="flat",bg="#5a4c67",activebackground="#540101",fg="white",activeforeground="white",cursor="hand1",bd=0,highlightthickness=0,width=12,height=2,font="Helvetica 10 bold",command=main)
#Add each widget to its layout position
board_area.grid(column=0,row=0)
board_area.grid_propagate(False)
input_area.grid(column=0,row=1)
input_area.grid_propagate(False)
reset_button.place(relx=.5,rely=.3,anchor="center")
find_button.place(relx=.5,rely=.6,anchor="center")
setup_grid(board_area)
def setup_grid(board_area):
global grid_frames
global grid_entries
for i in grid_frames:
i.destroy()
grid_frames = []
grid_entries = []
for i in range(3):
for j in range(3):
frame = tk.Frame(board_area,height=500/3,width=500/3,bg="#517796",highlightbackground="white",highlightthickness=2,highlightcolor="white")
frame.grid(column=i,row=j)
grid_frames.append(frame)
entry = tk.Entry(frame,bg="#517796",font="Helvetica 50 bold",fg="white",width=2,highlightcolor="white",insertbackground="white",justify="center",validate="key",vcmd=(validation, '%P'))
entry.place(relx=.5,rely=.5,anchor="center")
grid_entries.append(entry)
setup_gui()
root.mainloop()
| {"/main.py": ["/solver.py"]} |
62,219 | ArvidJB/versioned-hdf5 | refs/heads/master | /versioned_hdf5/replay.py | from __future__ import annotations
from typing import List, Iterable, Union, Dict, Any
from h5py import (
VirtualLayout,
h5s,
HLObject,
Dataset,
Group,
File,
__version__ as h5py_version
)
from h5py._hl.vds import VDSmap
from h5py._hl.selections import select
from h5py.h5i import get_name
from ndindex import Slice, ChunkSize, Tuple
from ndindex.ndindex import NDIndex
import numpy as np
from copy import deepcopy
import posixpath as pp
from collections import defaultdict
from .versions import all_versions
from .wrappers import (InMemoryGroup, DatasetWrapper, InMemoryDataset,
InMemoryArrayDataset, InMemorySparseDataset, _groups)
from .api import VersionedHDF5File
from .backend import (create_base_dataset, write_dataset,
write_dataset_chunks, create_virtual_dataset,
initialize)
from .slicetools import spaceid_to_slice
from .hashtable import Hashtable
def recreate_dataset(f, name, newf, callback=None):
"""
Recreate dataset from all versions into `newf`
`newf` should be a versioned hdf5 file/group that is already initialized
(it may or may not be in the same physical file as f). Typically `newf`
should be `tmp_group(f)` (see :func:`tmp_group`).
`callback` should be a function with the signature
callback(dataset, version_name)
It will be called on every dataset in every version. It should return the
dataset to be used for the new version. The dataset and its containing
group should not be modified in-place. If a new copy of a dataset is to be
used, it should be one of the dataset classes in versioned_hdf5.wrappers,
and should placed in a temporary group, which you may delete after
`recreate_dataset()` is done. The callback may also return None, in which
case the dataset is deleted for the given version.
Note: this function is only for advanced usage. Typical use-cases should
use :func:`delete_version()` or :func:`modify_metadata()`.
"""
if isinstance(f, VersionedHDF5File):
f = f.f
raw_data = f['_version_data'][name]['raw_data']
dtype = raw_data.dtype
chunks = raw_data.chunks
compression = raw_data.compression
compression_opts = raw_data.compression_opts
fillvalue = raw_data.fillvalue
first = True
for version_name in all_versions(f):
if name in f['_version_data/versions'][version_name]:
group = InMemoryGroup(f['_version_data/versions'][version_name].id,
_committed=True)
dataset = group[name]
if callback:
dataset = callback(dataset, version_name)
if dataset is None:
continue
dtype = dataset.dtype
shape = dataset.shape
chunks = dataset.chunks
compression = dataset.compression
compression_opts = dataset.compression_opts
fillvalue = dataset.fillvalue
attrs = dataset.attrs
if first:
create_base_dataset(newf, name,
data=np.empty((0,)*len(dataset.shape),
dtype=dtype),
dtype=dtype,
chunks=chunks,
compression=compression,
compression_opts=compression_opts,
fillvalue=fillvalue)
first = False
# Read in all the chunks of the dataset (we can't assume the new
# hash table has the raw data in the same locations, even if the
# data is unchanged).
if isinstance(dataset, (InMemoryDataset, InMemorySparseDataset)):
for c, index in dataset.data_dict.copy().items():
if isinstance(index, Slice):
dataset[c.raw]
assert not isinstance(dataset.data_dict[c], Slice)
slices = write_dataset_chunks(newf, name, dataset.data_dict)
else:
slices = write_dataset(newf, name, dataset)
create_virtual_dataset(newf, version_name, name, shape, slices,
attrs=attrs, fillvalue=fillvalue)
def tmp_group(f):
"""
Create a temporary group in `f` for use with :func:`recreate_dataset`.
"""
if isinstance(f, VersionedHDF5File):
f = f.f
if '__tmp__' not in f['_version_data']:
tmp = f['_version_data'].create_group('__tmp__')
initialize(tmp)
for version_name in all_versions(f):
group = f['_version_data/versions'][version_name]
new_group = tmp['_version_data/versions'].create_group(version_name)
for k, v in group.attrs.items():
new_group.attrs[k] = v
else:
tmp = f['_version_data/__tmp__']
return tmp
# See InMemoryDataset.fillvalue. In h5py3 variable length strings use None
# for the h5py fillvalue, but require a string fillvalue for NumPy.
def _get_np_fillvalue(data: Dataset) -> Any:
"""Get the fillvalue for an empty dataset.
See InMemoryDataset.fillvalue. In h5py3 variable length strings use None
for the h5py fillvalue, but require a string fillvalue for NumPy.
Parameters
----------
data : Dataset
Data for which the fillvalue is to be retrieved
Returns
-------
Any
Value used to fill the empty dataset; can be any numpy scalar type supported by
h5py
"""
if data.fillvalue is not None:
return data.fillvalue
if data.dtype.metadata:
if 'vlen' in data.dtype.metadata:
if (h5py_version.startswith('3') and
data.dtype.metadata['vlen'] == str):
return bytes()
return data.dtype.metadata['vlen']()
elif 'h5py_encoding' in data.dtype.metadata:
return data.dtype.type()
return np.zeros((), dtype=data.dtype)[()]
def _recreate_raw_data(
f: VersionedHDF5File,
name: str,
versions_to_delete: Iterable[str],
tmp: bool = False
) -> Dict[NDIndex, NDIndex]:
"""
Return a new raw data set for a dataset without the chunks from
versions_to_delete.
If no chunks would be left, i.e., the dataset does not appear in any
version not in versions_to_delete, None is returned.
If tmp is True, the new raw dataset is called '_tmp_raw_data' and is
placed alongside the existing raw dataset. Otherwise the existing raw
dataset is replaced.
"""
chunks_map = defaultdict(dict)
for version_name in all_versions(f):
if (version_name in versions_to_delete
or name not in f['_version_data/versions'][version_name]):
continue
dataset = f['_version_data/versions'][version_name][name]
if dataset.is_virtual:
virtual_sources = dataset.virtual_sources()
slice_map = {spaceid_to_slice(i.vspace):
spaceid_to_slice(i.src_space) for i in
virtual_sources}
else:
slice_map = {}
chunks_map[version_name].update(slice_map)
chunks_to_keep = set().union(*[map.values() for map in
chunks_map.values()])
chunks_to_keep = sorted(chunks_to_keep, key=lambda i: i.args[0].args[0])
raw_data = f['_version_data'][name]['raw_data']
chunks = ChunkSize(raw_data.chunks)
new_shape = (len(chunks_to_keep)*chunks[0], *chunks[1:])
new_raw_data = f['_version_data'][name].create_dataset(
'_tmp_raw_data', shape=new_shape, maxshape=(None,)+chunks[1:],
chunks=raw_data.chunks, dtype=raw_data.dtype,
compression=raw_data.compression,
compression_opts=raw_data.compression_opts,
fillvalue=raw_data.fillvalue)
for key, val in raw_data.attrs.items():
new_raw_data.attrs[key] = val
r = raw_data[:]
n = np.full(new_raw_data.shape, _get_np_fillvalue(raw_data), dtype=new_raw_data.dtype)
raw_data_chunks_map = {}
for new_chunk, chunk in zip(chunks.indices(new_shape), chunks_to_keep):
# Shrink new_chunk to the size of chunk, in case chunk isn't a full
# chunk in one of the dimensions.
# TODO: Implement something in ndindex to do this.
new_chunk = Tuple(
*[Slice(new_chunk.args[i].start,
new_chunk.args[i].start+len(chunk.args[i]))
for i in range(len(new_chunk.args))])
raw_data_chunks_map[chunk] = new_chunk
n[new_chunk.raw] = r[chunk.raw]
new_raw_data[:] = n
if not tmp:
del f['_version_data'][name]['raw_data']
f['_version_data'][name].move('_tmp_raw_data', 'raw_data')
return raw_data_chunks_map
def _recreate_hashtable(f, name, raw_data_chunks_map, tmp=False):
"""
Recreate the hashtable for the dataset f, with only the new chunks in the
raw_data_chunks_map.
If tmp=True, a new hashtable called '_tmp_hash_table' is created.
Otherwise the hashtable is replaced.
"""
# We could just reconstruct the hashtable with from_raw_data, but that is
# slow, so instead we recreate it manually from the old hashable and the
# raw_data_chunks_map.
old_hashtable = Hashtable(f, name)
new_hash_table = Hashtable(f, name, hash_table_name='_tmp_hash_table')
old_inverse = old_hashtable.inverse()
for old_chunk, new_chunk in raw_data_chunks_map.items():
if isinstance(old_chunk, Tuple):
old_chunk = old_chunk.args[0]
if isinstance(new_chunk, Tuple):
new_chunk = new_chunk.args[0]
new_hash_table[old_inverse[old_chunk.reduce()]] = new_chunk
new_hash_table.write()
if not tmp:
del f['_version_data'][name]['hash_table']
f['_version_data'][name].move('_tmp_hash_table', 'hash_table')
def _recreate_virtual_dataset(f, name, versions, raw_data_chunks_map, tmp=False):
"""
Recreate every virtual dataset `name` in the versions `versions` according
to the new raw_data chunks in `raw_data_chunks_map`.
Returns a dict mapping the chunks from the old raw dataset to the chunks
in the new raw dataset. Chunks not in the mapping were deleted. If the
dict is empty, then no remaining version contains the given dataset.
If tmp is True, the new virtual datasets are named `'_tmp_' + name` and
are placed alongside the existing ones. Otherwise the existing virtual
datasets are replaced.
"""
raw_data = f['_version_data'][name]['raw_data']
for version_name in versions:
if name not in f['_version_data/versions'][version_name]:
continue
group = f['_version_data/versions'][version_name]
dataset = group[name]
# See the comments in create_virtual_dataset
layout = VirtualLayout(dataset.shape, dtype=dataset.dtype)
layout_has_sources = hasattr(layout, 'sources')
if not layout_has_sources:
from h5py import _selector
layout._src_filenames.add(b'.')
space = h5s.create_simple(dataset.shape)
selector = _selector.Selector(space)
# If a dataset has no data except for the fillvalue, it will not be virtual
if dataset.is_virtual:
virtual_sources = dataset.virtual_sources()
for vmap in virtual_sources:
vspace, fname, dset_name, src_space = vmap
fname = fname.encode('utf-8')
assert fname == b'.', fname
vslice = spaceid_to_slice(vspace)
src_slice = spaceid_to_slice(src_space)
if src_slice not in raw_data_chunks_map:
raise ValueError(f"Could not find the chunk for {vslice} ({src_slice} in the old raw dataset) for {name!r} in {version_name!r}")
new_src_slice = raw_data_chunks_map[src_slice]
if not layout_has_sources:
key = new_src_slice.raw
vs_sel = select(raw_data.shape, key, dataset=None)
sel = selector.make_selection(vslice.raw)
layout.dcpl.set_virtual(
sel.id, b'.', raw_data.name.encode('utf-8'), vs_sel.id
)
else:
vs_sel = select(raw_data.shape, new_src_slice.raw, None)
layout_sel = select(dataset.shape, vslice.raw, None)
new_vmap = VDSmap(layout_sel.id, fname, dset_name, vs_sel.id)
layout.sources.append(new_vmap)
head, tail = pp.split(name)
tmp_name = '_tmp_' + tail
tmp_path = pp.join(head, tmp_name)
dtype = raw_data.dtype
fillvalue = dataset.fillvalue
if dtype.metadata and ('vlen' in dtype.metadata or 'h5py_encoding' in dtype.metadata):
# Variable length string dtype
# (https://h5py.readthedocs.io/en/2.10.0/strings.html). Setting the
# fillvalue in this case doesn't work
# (https://github.com/h5py/h5py/issues/941).
if fillvalue not in [0, '', b'', None]:
raise ValueError("Non-default fillvalue not supported for variable length strings")
fillvalue = None
tmp_dataset = group.create_virtual_dataset(tmp_path, layout, fillvalue=fillvalue)
for key, val in dataset.attrs.items():
tmp_dataset.attrs[key] = val
if not tmp:
del group[name]
group.move(tmp_path, name)
def _is_empty(f: VersionedHDF5File, name: str, version: str) -> bool:
"""Return True if the dataset at the given version is empty, False otherwise.
Assumes the dataset exists in the given verison.
Parameters
----------
f : VersionedHDF5File
File where the dataset resides
name : str
Name of the dataset
version : str
Version of the dataset to check
Returns
-------
bool
True if the dataset is empty, False otherwise
"""
return not f['_version_data/versions'][version][name].is_virtual
def _exists_in_version(f: VersionedHDF5File, name: str, version: str) -> bool:
"""Check if a dataset exists in a given version.
Parameters
----------
f : VersionedHDF5File
File where the dataset may reside
name : str
Name of the dataset
version : str
Version of the dataset to check
Returns
-------
bool
True if the dataset exists in the version, False otherwise
"""
return name in f['_version_data/versions'][version]
def _all_extant_are_empty(
f: VersionedHDF5File, name: str, versions: Iterable[str]
) -> bool:
"""Check if the given versions of a dataset are empty.
Doesn't assume the dataset exists in any version.
Parameters
----------
f : VersionedHDF5File
File where the dataset may reside
name : str
Name of the dataset
version : str
Version of the dataset to check
Returns
-------
bool
True if any version of the dataset that can be found is empty,
False if a version exists which is not.
"""
for version in versions:
if _exists_in_version(f, name, version):
if not _is_empty(f, name, version):
return False
return True
def _delete_dataset(f: VersionedHDF5File, name: str, versions_to_delete: Iterable[str]):
"""Delete the given dataset from the versions."""
version_data = f['_version_data']
versions = version_data['versions']
if name == 'versions':
return
versions_to_keep = set(versions) - set(versions_to_delete)
# If the dataset is empty in the versions to delete, we don't
# need to recreate the raw data, hash table, or virtual datasets.
if _all_extant_are_empty(f, name, versions_to_delete):
return
raw_data_chunks_map = _recreate_raw_data(f, name, versions_to_delete)
# If the dataset is not in any versions that are being kept, that
# data must be deleted.
if not any([name in versions[version] for version in versions_to_keep]):
del version_data[name]
return
# Recreate the hash table.
_recreate_hashtable(f, name, raw_data_chunks_map)
# Recreate every virtual dataset in every kept version.
_recreate_virtual_dataset(f, name, versions_to_keep, raw_data_chunks_map)
def _walk(g: HLObject, prefix: str = '') -> List[str]:
"""Traverse the object tree, returning all `raw_data` datasets.
We use this instead of version_data.visit(delete_dataset) because
visit() has trouble with the groups being deleted from under it.
Parameters
----------
g : HLObject
Object containing datasets as descendants
prefix : str
Prefix to apply to object names; can be used to filter particular descendants
Returns
-------
List[str]
List of the names of `raw_data` datasets in g
"""
datasets = []
for name in g:
obj = g[name]
if isinstance(obj, Group):
if 'raw_data' in obj:
datasets.append(prefix + name)
else:
datasets.extend(_walk(obj, prefix + name + '/'))
return datasets
def delete_versions(
f: Union[VersionedHDF5File, File],
versions_to_delete: Iterable[str]
):
"""Completely delete the given versions from a file
This function should be used instead of deleting the version group
directly, as this will not delete the underlying data that is unique to
the version.
"""
if isinstance(f, VersionedHDF5File):
f = f.f
version_data = f['_version_data']
if isinstance(versions_to_delete, str):
versions_to_delete = [versions_to_delete]
versions = version_data['versions']
if '__first_version__' in versions_to_delete:
raise ValueError("Cannot delete first version")
for version in versions_to_delete:
if version not in versions:
raise ValueError(f"Version {version!r} does not exist")
current_version = versions.attrs['current_version']
while current_version in versions_to_delete:
current_version = versions[current_version].attrs['prev_version']
for name in _walk(version_data):
_delete_dataset(f, name, versions_to_delete)
for version_name in versions_to_delete:
prev_version = versions[version_name].attrs['prev_version']
for _version in versions:
if _version == '__first_version__':
continue
v = versions[_version]
if v.attrs['prev_version'] == version_name:
v.attrs['prev_version'] = prev_version
del versions[version_name]
versions.attrs['current_version'] = current_version
# Backwards compatibility
delete_version = delete_versions
def modify_metadata(f, dataset_name, *, chunks=None, compression=None,
compression_opts=None, dtype=None, fillvalue=None):
"""
Modify metadata for a versioned dataset in-place.
The metadata is modified for all versions containing a dataset.
`f` should be the h5py file or versioned_hdf5 VersionedHDF5File object.
`dataset_name` is the name of the dataset in the version group(s).
Metadata that may be modified are
- `chunks`: must be compatible with the dataset shape
- `compression`: see `h5py.Group.create_dataset()`
- `compression_opts`: see `h5py.Group.create_dataset()`
- `dtype`: all data in the dataset is cast to the new dtype
- `fillvalue`: see the note below
If set to `None` (the default), the given metadata is not modified.
Note for `fillvalue`, all values equal to the old fillvalue are updated to
be the new fillvalue, regardless of whether they are explicitly stored or
represented sparsely in the underlying HDF5 dataset. Also note that
datasets without an explicitly set fillvalue have a default fillvalue
equal to the default value of the dtype (e.g., 0. for float dtypes).
"""
if isinstance(f, VersionedHDF5File):
f = f.f
def callback(dataset, version_name):
_chunks = chunks or dataset.chunks
_fillvalue = fillvalue or dataset.fillvalue
if isinstance(dataset, DatasetWrapper):
dataset = dataset.dataset
name = dataset.name[len(dataset.parent.name)+1:]
if isinstance(dataset, (InMemoryDataset, InMemoryArrayDataset)):
new_dataset = InMemoryArrayDataset(name, dataset[()], tmp_parent,
fillvalue=_fillvalue,
chunks=_chunks)
if _fillvalue:
new_dataset[new_dataset == dataset.fillvalue] = _fillvalue
elif isinstance(dataset, InMemorySparseDataset):
new_dataset = InMemorySparseDataset(name, shape=dataset.shape,
dtype=dataset.dtype,
parent=tmp_parent,
chunks=_chunks,
fillvalue=_fillvalue)
new_dataset.data_dict = deepcopy(dataset.data_dict)
if _fillvalue:
for a in new_dataset.data_dict.values():
a[a == dataset.fillvalue] = _fillvalue
else:
raise NotImplementedError(type(dataset))
if compression:
new_dataset.compression = compression
if compression_opts:
new_dataset.compression_opts = compression_opts
if dtype:
return new_dataset.as_dtype(name, dtype, tmp_parent)
return new_dataset
newf = tmp_group(f)
tmp_parent = InMemoryGroup(newf.create_group('__tmp_parent__').id)
try:
recreate_dataset(f, dataset_name, newf, callback=callback)
swap(f, newf)
finally:
del newf[newf.name]
def swap(old, new):
"""
Swap every dataset in old with the corresponding one in new
Datasets in old that aren't in new are ignored.
"""
move_names = []
def _move(name, object):
if isinstance(object, Dataset):
if name in new:
move_names.append(name)
old.visititems(_move)
for name in move_names:
if new[name].is_virtual:
# We cannot simply move virtual datasets, because they will still
# point to the old raw_data location. So instead, we have to
# recreate them, pointing to the new raw_data.
oldd = old[name]
newd = new[name]
def _normalize(path):
return path if path.endswith('/') else path + '/'
def _replace_prefix(path, name1, name2):
"""Replace the prefix name1 with name2 in path"""
name1 = _normalize(name1)
name2 = _normalize(name2)
return name2 + path[len(name1):]
def _new_vds_layout(d, name1, name2):
"""Recreate a VirtualLayout for d, replacing name1 with name2 in the source dset name"""
virtual_sources = d.virtual_sources()
layout = VirtualLayout(d.shape, dtype=d.dtype)
for vmap in virtual_sources:
vspace, fname, dset_name, src_space = vmap
assert dset_name.startswith(name1)
dset_name = _replace_prefix(dset_name, name1, name2)
fname = fname.encode('utf-8')
new_vmap = VDSmap(vspace, fname, dset_name, src_space)
# h5py 3.3 changed the VirtualLayout code. See
# https://github.com/h5py/h5py/pull/1905.
if hasattr(layout, 'sources'):
layout.sources.append(new_vmap)
else:
layout.dcpl.set_virtual(vspace, fname,
dset_name.encode('utf-8'), src_space)
return layout
old_layout = _new_vds_layout(oldd, old.name, new.name)
new_layout = _new_vds_layout(newd, new.name, old.name)
old_fillvalue = old[name].fillvalue
new_fillvalue = new[name].fillvalue
old_attrs = dict(old[name].attrs)
new_attrs = dict(new[name].attrs)
del old[name]
old.create_virtual_dataset(name, new_layout, fillvalue=new_fillvalue)
for k, v in new_attrs.items():
if isinstance(v, str) and v.startswith(new.name):
v = _replace_prefix(v, new.name, old.name)
old[name].attrs[k] = v
del new[name]
new.create_virtual_dataset(name, old_layout, fillvalue=old_fillvalue)
for k, v in old_attrs.items():
if isinstance(v, str) and v.startswith(old.name):
v = _replace_prefix(v, old.name, new.name)
new[name].attrs[k] = v
else:
# Invalidate any InMemoryGroups that point to these groups
delete = []
for bind in _groups:
if get_name(bind) and (get_name(bind).startswith(get_name(old.id)) or get_name(bind).startswith(get_name(new.id))):
delete.append(bind)
for d in delete:
del _groups[d]
old.move(name, pp.join(new.name, name + '__tmp'))
new.move(name, pp.join(old.name, name))
new.move(name + '__tmp', name)
| {"/versioned_hdf5/replay.py": ["/versioned_hdf5/api.py", "/versioned_hdf5/slicetools.py", "/versioned_hdf5/hashtable.py"], "/analysis/generate_data.py": ["/utils/__init__.py", "/versioned_hdf5/api.py"], "/versioned_hdf5/__init__.py": ["/versioned_hdf5/api.py", "/versioned_hdf5/replay.py"], "/benchmarks/resize.py": ["/versioned_hdf5/__init__.py"], "/versioned_hdf5/tests/test_replay.py": ["/versioned_hdf5/replay.py", "/versioned_hdf5/hashtable.py"], "/versioned_hdf5/api.py": ["/versioned_hdf5/hashtable.py"], "/benchmarks/many_chunks.py": ["/versioned_hdf5/__init__.py"], "/benchmarks/versionedhdf5file.py": ["/versioned_hdf5/__init__.py"], "/benchmarks/delete_versions.py": ["/versioned_hdf5/__init__.py", "/versioned_hdf5/replay.py"], "/versioned_hdf5/tests/test_hashtable.py": ["/versioned_hdf5/hashtable.py", "/versioned_hdf5/tests/helpers.py", "/versioned_hdf5/__init__.py"], "/analysis/performance_tests.py": ["/versioned_hdf5/__init__.py"]} |
62,220 | ArvidJB/versioned-hdf5 | refs/heads/master | /analysis/generate_data.py | from __future__ import (absolute_import, division, print_function, with_statement)
import datetime
import logging
import random
import time
import sys
import h5py
import numpy as np
sys.path.append("..")
from generate_data_base import TestDatasetPerformanceBase
from utils import temp_dir_ctx
from versioned_hdf5.api import VersionedHDF5File
class TestVersionedDatasetPerformance(TestDatasetPerformanceBase):
@classmethod
def _write_transactions_sparse(cls, name, chunk_size, compression, versions,
print_transactions,
num_rows_initial, num_transactions,
num_rows_per_append,
pct_changes, num_changes,
pct_deletes, num_deletes,
pct_inserts, num_inserts):
logger = logging.getLogger(__name__)
#with temp_dir_ctx() as tmp_dir:
#filename = tmp_dir + f'/{name}.h5'
filename = f"{name}.h5"
tts = []
f = h5py.File(filename, 'w')
told = time.time()
t0 = told
times = []
try:
if versions:
file = VersionedHDF5File(f)
with file.stage_version("initial_version") as group:
key0_ds = group.create_dataset(name + '/key0',
data=np.random.rand(num_rows_initial),
dtype=(np.dtype('int64')),
chunks=chunk_size,
compression=compression)
key1_ds = group.create_dataset(name + '/key1',
data=np.random.rand(num_rows_initial),
dtype=(np.dtype('int64')),
chunks=chunk_size,
compression=compression)
val_ds = group.create_dataset(name + '/val',
data=np.random.rand(num_rows_initial),
dtype=(np.dtype('float64')),
chunks=chunk_size,
compression=compression)
else:
key0_ds = f.create_dataset(name + '/key0',
data=np.random.rand(num_rows_initial),
dtype=(np.dtype('int64')),
maxshape=(None,),
chunks=(chunk_size,),
compression=compression)
key1_ds = f.create_dataset(name + '/key1',
data=np.random.rand(num_rows_initial),
dtype=(np.dtype('int64')),
maxshape=(None,),
chunks=(chunk_size,),
compression=compression)
val_ds = f.create_dataset(name + '/val',
data=np.random.rand(num_rows_initial),
dtype=(np.dtype('float64')),
maxshape=(None,),
chunks=(chunk_size,),
compression=compression)
for a in range(num_transactions):
if print_transactions:
print("Transaction", a)
tt = datetime.datetime.utcnow()
if versions:
with file.stage_version(str(tt)) as group:
key0_ds = group[name + '/key0']
key1_ds = group[name + '/key1']
val_ds = group[name + '/val']
cls._modify_dss_sparse(key0_ds, key1_ds, val_ds,
num_rows_per_append,
pct_changes if a > 0 else 0.0, num_changes,
pct_deletes if a > 0 else 0.0, num_deletes,
pct_inserts if a > 0 else 0.0, num_inserts)
else:
cls._modify_dss_sparse(key0_ds, key1_ds, val_ds,
num_rows_per_append,
pct_changes if a > 0 else 0.0, num_changes,
pct_deletes if a > 0 else 0.0, num_deletes,
pct_inserts if a > 0 else 0.0, num_inserts)
t = time.time()
times.append(t-told)
told = t
tts.append(tt)
logger.info('Wrote transaction %d at transaction time %s', a, tt)
f.flush()
times.append(t-t0)
finally:
f.close()
return times
@classmethod
def _write_transactions_dense(cls, name,
chunk_size,
compression,
versions,
print_transactions,
num_rows_initial_0, num_rows_initial_1,
num_transactions,
num_rows_per_append_0,
pct_changes, num_changes,
pct_deletes, num_deletes_0, num_deletes_1,
pct_inserts, num_inserts_0, num_inserts_1):
logger = logging.getLogger(__name__)
# with temp_dir_ctx() as tmp_dir:
#with f"/{name}.h5" as filename:
filename = f'{name}.h5'
tts = []
f = h5py.File(filename, 'w')
told = time.time()
t0 = told
times = []
try:
if versions:
file = VersionedHDF5File(f)
with file.stage_version("initial_version") as group:
key0_ds = group.create_dataset(name + '/key0',
data=np.random.rand(num_rows_initial_0),
dtype=(np.dtype('int64')),
chunks=chunk_size,
compression=compression)
key1_ds = group.create_dataset(name + '/key1',
data=np.random.rand(num_rows_initial_1),
dtype=(np.dtype('int64')),
chunks=chunk_size,
compression=compression)
# two dimensional value array
val_ds = group.create_dataset(name + '/val',
data=np.random.rand(num_rows_initial_0, num_rows_initial_1),
dtype=np.dtype('float64'),
chunks=(chunk_size, chunk_size),
compression=compression)
else:
key0_ds = f.create_dataset(name + '/key0',
data=np.random.rand(num_rows_initial_0),
dtype=np.dtype('int64'),
maxshape=(None,),
chunks=(chunk_size,),
compression=compression)
key1_ds = f.create_dataset(name + '/key1',
data=np.random.rand(num_rows_initial_0),
dtype=np.dtype('int64'),
maxshape=(None,),
chunks=(chunk_size,),
compression=compression)
val_ds = f.create_dataset(name + '/val',
data=np.random.rand(num_rows_initial_0, num_rows_initial_1),
dtype=np.dtype('float64'),
maxshape=(None, None),
chunks=(chunk_size, chunk_size),
compression=compression)
for a in range(num_transactions):
if print_transactions:
print(f"Transaction {a} of {num_transactions}")
tt = datetime.datetime.utcnow()
if versions:
with file.stage_version(str(tt)) as group:
key0_ds = group[name + '/key0']
key1_ds = group[name + '/key1']
val_ds = group[name + '/val']
cls._modify_dss_dense(key0_ds, key1_ds, val_ds,
num_rows_per_append_0,
pct_changes if a > 0 else 0.0, num_changes,
pct_deletes if a > 0 else 0.0, num_deletes_0, num_deletes_1,
pct_inserts if a > 0 else 0.0, num_inserts_0, num_inserts_1)
else:
cls._modify_dss_dense(key0_ds, key1_ds, val_ds,
num_rows_per_append_0,
pct_changes if a > 0 else 0.0, num_changes,
pct_deletes if a > 0 else 0.0, num_deletes_0, num_deletes_1,
pct_inserts if a > 0 else 0.0, num_inserts_0, num_inserts_1)
t = time.time()
times.append(t-told)
told = t
tts.append(tt)
logger.info('Wrote transaction %d at transaction time %s', a, tt)
f.flush()
times.append(t-t0)
finally:
f.close()
return times
@classmethod
def _write_transactions_dense_old(cls, name, num_rows_initial_0, num_rows_initial_1,
num_transactions,
num_rows_per_append_0,
pct_changes, num_changes,
pct_deletes, num_deletes_0, num_deletes_1,
pct_inserts, num_inserts_0, num_inserts_1):
logger = logging.getLogger(__name__)
with temp_dir_ctx() as tmp_dir:
filename = tmp_dir + f'/{name}.h5'
tts = []
f = h5py.File(filename, 'w')
file = VersionedHDF5File(f)
try:
with file.stage_version("initial_version") as group:
key0_ds = group.create_dataset(name + '/key0', data=np.random.rand(num_rows_initial_0),
dtype=(np.dtype('int64')))
key1_ds = group.create_dataset(name + '/key1', data=np.random.rand(num_rows_initial_1),
dtype=(np.dtype('int64')))
val_ds = group.create_dataset(name + '/val',
data=np.random.rand(num_rows_initial_0 * num_rows_initial_1),
dtype=(np.dtype('float64')))
for a in range(num_transactions):
tt = datetime.datetime.utcnow()
with file.stage_version(str(tt)) as group:
key0_ds = group[name + '/key0']
key1_ds = group[name + '/key1']
val_ds = group[name + '/val']
cls._modify_dss_dense_old(key0_ds, key1_ds, val_ds,
num_rows_per_append_0,
pct_changes if a > 0 else 0.0, num_changes,
pct_deletes if a > 0 else 0.0, num_deletes_0, num_deletes_1,
pct_inserts if a > 0 else 0.0, num_inserts_0, num_inserts_1)
tts.append(tt)
logger.info('Wrote transaction %d at transaction time %s', a, tt)
finally:
f.close()
@classmethod
def _modify_dss_dense_old(cls, key0_ds, key1_ds, val_ds,
num_rows_per_append_0,
pct_changes, num_changes,
pct_deletes, num_deletes_0, num_deletes_1,
pct_inserts, num_inserts_0, num_inserts_1):
n_key0 = len(key0_ds)
n_key1 = len(key1_ds)
n_val = len(val_ds)
assert n_val == n_key0 * n_key1
# change values
if random.randrange(0, 100) <= pct_changes:
r_num_chgs = int(np.random.randn() + num_changes)
for b in range(r_num_chgs):
r = random.randrange(0, n_val)
val_ds[r] = np.random.rand()
# delete rows
if random.randrange(0, 100) <= pct_deletes:
# delete from values in two steps
# 1. delete from key0 and associated vals
r_num_dels_0 = max(int(np.random.randn() + num_deletes_0), 1)
rs_0 = [random.randrange(0, n_key0) for _ in range(r_num_dels_0)]
rs_val = [r0 * n_key1 + r1 for r0 in rs_0 for r1 in range(n_key1)]
n_val -= len(rs_val)
arr_val = val_ds[:]
arr_val = np.delete(arr_val, rs_val)
n_key0 -= r_num_dels_0
arr_key0 = key0_ds[:]
arr_key0 = np.delete(arr_key0, rs_0)
key0_ds.resize((n_key0,), refcheck=False)
key0_ds[:] = arr_key0
# 2. delete from key1 and associated vals
r_num_dels_1 = max(int(np.random.randn() + num_deletes_1), 1)
rs_1 = [random.randrange(0, n_key1) for _ in range(r_num_dels_1)]
rs_val = [r0 * n_key1 + r1 for r0 in range(n_key0) for r1 in rs_1]
n_val -= len(rs_val)
arr_val = np.delete(arr_val, rs_val)
val_ds.resize((n_val,), refcheck=False)
val_ds[:] = arr_val
n_key1 -= r_num_dels_1
arr_key1 = key1_ds[:]
arr_key1 = np.delete(arr_key1, rs_1)
key1_ds.resize((n_key1,), refcheck=False)
key1_ds[:] = arr_key1
# insert rows
if random.randrange(0, 100) <= pct_inserts:
# insert into values in two steps
arr_val = val_ds[:]
# 1. insert into key0 and associated vals
rand_num_inss_0 = max(int(np.random.randn() + num_inserts_0), 1)
rs_0 = [random.randrange(0, n_key0) for _ in range(rand_num_inss_0)]
rs_val = [r0 * n_key1 + r1 for r0 in rs_0 for r1 in range(n_key1)]
n_val += len(rs_val)
arr_val = np.insert(arr_val, rs_val, [np.random.rand() for _ in rs_val])
arr_key0 = key0_ds[:]
arr_key0 = np.insert(arr_key0, rs_0, np.random.randint(0, int(1e6), size=len(rs_0)))
n_key0 += rand_num_inss_0
key0_ds.resize((n_key0,), refcheck=False)
key0_ds[:] = arr_key0
# 2. insert into key1 and associated vals
rand_num_inss_1 = max(int(np.random.randn() + num_inserts_1), 1)
rs_1 = [random.randrange(0, n_key1) for _ in range(rand_num_inss_1)]
rs_val = [r0 * n_key1 + r1 for r0 in range(n_key0) for r1 in rs_1]
n_val += len(rs_val)
arr_val = np.insert(arr_val, rs_val, np.random.rand(len(rs_val)))
val_ds.resize((n_val,), refcheck=False)
val_ds[:] = arr_val
arr_key1 = key1_ds[:]
arr_key1 = np.insert(arr_key1, rs_1, np.random.randint(0, int(1e6), size=len(rs_1)))
n_key1 += rand_num_inss_1
key1_ds.resize((n_key1,), refcheck=False)
key1_ds[:] = arr_key1
# append
rand_num_apps_0 = int(np.random.randn() + num_rows_per_append_0)
if rand_num_apps_0 > 0:
# append to key0 and associated vals
n_key0 += rand_num_apps_0
key0_ds.resize((n_key0,), refcheck=False)
key0_ds[-rand_num_apps_0:] = np.random.randint(0, int(1e6), size=rand_num_apps_0)
num_val_apps = n_key1 * rand_num_apps_0
n_val += num_val_apps
val_ds.resize((n_val,), refcheck=False)
val_ds[-num_val_apps:] = np.random.rand(num_val_apps)
def test_mostly_appends_dense_old(self,
num_transactions=250,
filename="test_mostly_appends_dense_old",
chunk_size=None,
compression=None,
versions=True,
print_transactions=False,
deterministic=False):
num_rows_initial_0 = 30
num_rows_initial_1 = 30
num_rows_per_append_0 = 1
if deterministic:
pct_inserts = 0
pct_deletes = 0
pct_changes = 0
else:
pct_inserts = 5
pct_deletes = 1
pct_changes = 5
num_inserts_0 = 1
num_inserts_1 = 10
num_deletes_0 = 1
num_deletes_1 = 1
num_changes = 10
times = self._write_transactions_dense_old(filename,
chunk_size,
compression,
versions,
print_transactions,
num_rows_initial_0, num_rows_initial_1,
num_transactions,
num_rows_per_append_0,
pct_changes, num_changes,
pct_deletes, num_deletes_0, num_deletes_1,
pct_inserts, num_inserts_0, num_inserts_1)
return times
if __name__ == '__main__':
num_transactions = [500]
for t in num_transactions:
times = TestVersionedDatasetPerformance().test_large_fraction_changes_sparse(t, print_transactions=True)
| {"/versioned_hdf5/replay.py": ["/versioned_hdf5/api.py", "/versioned_hdf5/slicetools.py", "/versioned_hdf5/hashtable.py"], "/analysis/generate_data.py": ["/utils/__init__.py", "/versioned_hdf5/api.py"], "/versioned_hdf5/__init__.py": ["/versioned_hdf5/api.py", "/versioned_hdf5/replay.py"], "/benchmarks/resize.py": ["/versioned_hdf5/__init__.py"], "/versioned_hdf5/tests/test_replay.py": ["/versioned_hdf5/replay.py", "/versioned_hdf5/hashtable.py"], "/versioned_hdf5/api.py": ["/versioned_hdf5/hashtable.py"], "/benchmarks/many_chunks.py": ["/versioned_hdf5/__init__.py"], "/benchmarks/versionedhdf5file.py": ["/versioned_hdf5/__init__.py"], "/benchmarks/delete_versions.py": ["/versioned_hdf5/__init__.py", "/versioned_hdf5/replay.py"], "/versioned_hdf5/tests/test_hashtable.py": ["/versioned_hdf5/hashtable.py", "/versioned_hdf5/tests/helpers.py", "/versioned_hdf5/__init__.py"], "/analysis/performance_tests.py": ["/versioned_hdf5/__init__.py"]} |
62,221 | ArvidJB/versioned-hdf5 | refs/heads/master | /versioned_hdf5/__init__.py | from .api import VersionedHDF5File
from .replay import delete_version, delete_versions, modify_metadata
__all__ = ['VersionedHDF5File', 'delete_version', 'delete_versions', 'modify_metadata']
from ._version import get_versions
__version__ = get_versions()['version']
del get_versions
| {"/versioned_hdf5/replay.py": ["/versioned_hdf5/api.py", "/versioned_hdf5/slicetools.py", "/versioned_hdf5/hashtable.py"], "/analysis/generate_data.py": ["/utils/__init__.py", "/versioned_hdf5/api.py"], "/versioned_hdf5/__init__.py": ["/versioned_hdf5/api.py", "/versioned_hdf5/replay.py"], "/benchmarks/resize.py": ["/versioned_hdf5/__init__.py"], "/versioned_hdf5/tests/test_replay.py": ["/versioned_hdf5/replay.py", "/versioned_hdf5/hashtable.py"], "/versioned_hdf5/api.py": ["/versioned_hdf5/hashtable.py"], "/benchmarks/many_chunks.py": ["/versioned_hdf5/__init__.py"], "/benchmarks/versionedhdf5file.py": ["/versioned_hdf5/__init__.py"], "/benchmarks/delete_versions.py": ["/versioned_hdf5/__init__.py", "/versioned_hdf5/replay.py"], "/versioned_hdf5/tests/test_hashtable.py": ["/versioned_hdf5/hashtable.py", "/versioned_hdf5/tests/helpers.py", "/versioned_hdf5/__init__.py"], "/analysis/performance_tests.py": ["/versioned_hdf5/__init__.py"]} |
62,222 | ArvidJB/versioned-hdf5 | refs/heads/master | /analysis/generate_data_base.py | from __future__ import (absolute_import, division, print_function, with_statement)
import abc
import random
from unittest import TestCase
import numpy as np
import scipy.stats
class TestDatasetPerformanceBase(TestCase, metaclass=abc.ABCMeta):
"""
Test cases for the most common use cases where we encounter when we write data to HDF5.
In general all data has multiple columns which are divided into "keys" and "values". The keys
determine the identity of the row (this stock, this time) and the values are the associated
values (the price, ...).
We have two different implementation methods:
- "sparse": key and value columns are stored as arrays of equal length and to get the i-th
"row" you read key0[i], key1[i], ..., val0[i], val1[i], ...
- "dense": key columns are the labels of the axes of the data and the length of the value
column is the product of the length of the key columns:
len(val0) == len(key0) * len(key1) * ...
To get the i-th row you retrieve
key0[i // len(key1) // len(key2) // ...],
key1[(i // len(key2) // len(key3) // ...) % len(key1)],
key2[(i // len(key3) // len(key4) // ...) % len(key2)],
...,
val0[i], val1[i], ...
TODO: check the math!
"""
# models
RECENCTNESS_POWERLAW_SHAPE = 20.0
def test_mostly_appends_sparse(self,
num_transactions=250,
filename="test_mostly_appends_sparse",
chunk_size=None,
compression=None,
versions=True,
print_transactions=False,
deterministic=False):
num_rows_initial = 1000
num_rows_per_append = 1000
if deterministic:
pct_inserts = 0
pct_deletes = 0
pct_changes = 0
else:
pct_inserts = 5
pct_deletes = 1
pct_changes = 5
num_inserts = 10
num_deletes = 10
num_changes = 10
times = self._write_transactions_sparse(filename, chunk_size, compression, versions,
print_transactions, num_rows_initial,
num_transactions, num_rows_per_append,
pct_changes, num_changes,
pct_deletes, num_deletes,
pct_inserts, num_inserts)
return times
@classmethod
@abc.abstractmethod
def _write_transactions_sparse(cls, name, chunk_size, compression, versions,
print_transactions,
num_rows_initial, num_transactions,
num_rows_per_append,
pct_changes, num_changes,
pct_deletes, num_deletes,
pct_inserts, num_inserts):
pass
@classmethod
def _get_rand_fn(cls, dtype):
if dtype == np.dtype('int64'):
return lambda size=None: np.random.randint(0, int(1e6), size=size)
elif dtype == np.dtype('float64'):
return np.random.rand
else:
raise ValueError('implement other dtypes')
@classmethod
def _modify_dss_sparse(cls, key0_ds, key1_ds, val_ds, num_rows_per_append,
pct_changes, num_changes,
pct_deletes, num_deletes,
pct_inserts, num_inserts):
ns = set([len(ds) for ds in [key0_ds, key1_ds, val_ds]])
assert len(ns) == 1
n = next(iter(ns))
# change values
if random.randrange(0, 100) <= pct_changes:
r_num_chgs = int(np.random.randn() + num_changes)
rand_fn = cls._get_rand_fn(val_ds.dtype)
for b in range(r_num_chgs):
r = random.randrange(0, n)
val_ds[r] = rand_fn()
# delete rows
if random.randrange(0, 100) <= pct_deletes:
r_num_dels = max(int(np.random.randn() + num_deletes), 1)
pdf = scipy.stats.powerlaw.rvs(TestDatasetPerformanceBase.RECENCTNESS_POWERLAW_SHAPE, size=r_num_dels)
rs = np.unique((pdf * n).astype('int64'))
minr = min(rs)
n -= len(rs)
for ds in [key0_ds, key1_ds, val_ds]:
arr = ds[minr:]
arr = np.delete(arr, rs - minr)
ds.resize((n,))
ds[minr:] = arr
# insert rows
if random.randrange(0, 100) <= pct_inserts:
rand_num_inss = max(int(np.random.randn() + num_inserts), 1)
pdf = scipy.stats.powerlaw.rvs(TestDatasetPerformanceBase.RECENCTNESS_POWERLAW_SHAPE, size=rand_num_inss)
rs = np.unique((pdf * n).astype('int64'))
minr = min(rs)
n += len(rs)
for ds in [key0_ds, key1_ds, val_ds]:
rand_fn = cls._get_rand_fn(ds.dtype)
arr = ds[minr:]
arr = np.insert(arr, rs - minr, [rand_fn() for _ in rs])
ds.resize((n,))
ds[minr:] = arr
# append
rand_num_apps = int(10 * np.random.randn() + num_rows_per_append)
if rand_num_apps > 0:
n += rand_num_apps
for ds in [key0_ds, key1_ds, val_ds]:
rand_fn = cls._get_rand_fn(ds.dtype)
ds.resize((n,))
ds[-rand_num_apps:] = rand_fn(rand_num_apps)
def test_large_fraction_changes_sparse(self,
num_transactions=250,
filename="test_large_fraction_changes_sparse",
chunk_size=None,
compression=None,
versions=True,
print_transactions=False,
deterministic=False):
num_rows_initial = 5000
num_rows_per_append = 10
if deterministic:
pct_inserts = 0
pct_deletes = 0
pct_changes = 0
else:
pct_inserts = 1
pct_deletes = 1
pct_changes = 90
num_inserts = 10
num_deletes = 10
num_changes = 1000
times = self._write_transactions_sparse(filename,
chunk_size,
compression,
versions,
print_transactions,
num_rows_initial,
num_transactions,
num_rows_per_append,
pct_changes, num_changes,
pct_deletes, num_deletes,
pct_inserts, num_inserts)
return times
def test_small_fraction_changes_sparse(self,
num_transactions=250,
filename="test_small_fraction_changes_sparse",
chunk_size=None,
compression=None,
versions=True,
print_transactions=False,
deterministic=False):
num_rows_initial = 5000
num_rows_per_append = 10
if deterministic:
pct_inserts = 0
pct_deletes = 0
pct_changes = 0
else:
pct_inserts = 1
pct_deletes = 1
pct_changes = 90
num_inserts = 10
num_deletes = 10
num_changes = 10
times = self._write_transactions_sparse(filename,
chunk_size,
compression,
versions,
print_transactions,
num_rows_initial,
num_transactions,
num_rows_per_append,
pct_changes, num_changes,
pct_deletes, num_deletes,
pct_inserts, num_inserts)
return times
def test_large_fraction_constant_sparse(self,
num_transactions=250,
filename="test_large_fraction_constant_sparse",
chunk_size=None,
compression=None,
versions=True,
print_transactions=False,
deterministic=False):
num_rows_initial = 5000
num_rows_per_append = 0 # triggers the constant size test (FIXME)
pct_inserts = 0
pct_deletes = 0
pct_changes = 0
num_inserts = 10
num_deletes = 10
num_changes = 1000
times = self._write_transactions_sparse(filename,
chunk_size,
compression,
versions,
print_transactions,
num_rows_initial,
num_transactions,
num_rows_per_append,
pct_changes, num_changes,
pct_deletes, num_deletes,
pct_inserts, num_inserts)
return times
def test_mostly_appends_dense(self,
num_transactions=250,
filename="test_mostly_appends_dense",
chunk_size=None,
compression=None,
versions=True,
print_transactions=False,
deterministic=False):
num_rows_initial_0 = 30
num_rows_initial_1 = 30
num_rows_per_append_0 = 1
if deterministic:
pct_inserts = 0
pct_deletes = 0
pct_changes = 0
else:
pct_inserts = 5
pct_deletes = 1
pct_changes = 5
num_inserts_0 = 1
num_inserts_1 = 10
num_deletes_0 = 1
num_deletes_1 = 1
num_changes = 10
times = self._write_transactions_dense(filename,
chunk_size,
compression,
versions,
print_transactions,
num_rows_initial_0, num_rows_initial_1,
num_transactions,
num_rows_per_append_0,
pct_changes, num_changes,
pct_deletes, num_deletes_0, num_deletes_1,
pct_inserts, num_inserts_0, num_inserts_1)
return times
@classmethod
@abc.abstractmethod
def _write_transactions_dense(cls, name,
chunk_size,
compression,
versions,
print_transactions,
num_rows_initial_0, num_rows_initial_1,
num_transactions,
num_rows_per_append_0,
pct_changes, num_changes,
pct_deletes, num_deletes_0, num_deletes_1,
pct_inserts, num_inserts_0, num_inserts_1):
pass
@classmethod
def _modify_dss_dense(cls, key0_ds, key1_ds, val_ds,
num_rows_per_append_0,
pct_changes, num_changes,
pct_deletes, num_deletes_0, num_deletes_1,
pct_inserts, num_inserts_0, num_inserts_1):
n_key0 = len(key0_ds)
n_key1 = len(key1_ds)
val_shape = val_ds.shape
assert val_shape == (n_key0, n_key1)
# change values
if random.randrange(0, 100) <= pct_changes:
r_num_chgs = int(np.random.randn() + num_changes)
for b in range(r_num_chgs):
r = (random.randrange(0, n_key0), random.randrange(0, n_key1))
val_ds[r] = np.random.rand()
# delete rows
if random.randrange(0, 100) <= pct_deletes:
# delete from values in two steps
# 1. delete from key0 and associated vals
r_num_dels_0 = max(int(np.random.randn() + num_deletes_0), 1)
pdf = scipy.stats.powerlaw.rvs(TestDatasetPerformanceBase.RECENCTNESS_POWERLAW_SHAPE, size=r_num_dels_0)
rs_0 = np.unique((pdf * n_key0).astype('int64'))
minr_0 = min(rs_0)
n_key0 -= len(rs_0)
arr_key0 = key0_ds[minr_0:]
arr_key0 = np.delete(arr_key0, rs_0 - minr_0)
key0_ds.resize((n_key0,))
key0_ds[minr_0:] = arr_key0
arr_val = val_ds[minr_0:, :]
val_shape = (val_shape[0] - len(rs_0), val_shape[1])
val_ds.resize(val_shape)
arr_val = np.delete(arr_val, rs_0 - minr_0, axis=0)
val_ds[minr_0:, :] = arr_val
# 2. delete from key1 and associated vals
r_num_dels_1 = max(int(np.random.randn() + num_deletes_1), 1)
pdf = scipy.stats.powerlaw.rvs(TestDatasetPerformanceBase.RECENCTNESS_POWERLAW_SHAPE, size=r_num_dels_1)
rs_1 = np.unique((pdf * n_key1).astype('int64'))
minr_1 = min(rs_1)
n_key1 -= len(rs_1)
arr_key1 = key1_ds[minr_1:]
arr_key1 = np.delete(arr_key1, rs_1 - minr_1)
key1_ds.resize((n_key1,))
key1_ds[minr_1:] = arr_key1
arr_val = val_ds[:, minr_1:]
val_shape = (val_shape[0], val_shape[1] - len(rs_1))
val_ds.resize(val_shape)
arr_val = np.delete(arr_val, rs_1 - minr_1, axis=1)
val_ds[:, minr_1:] = arr_val
# insert rows
if random.randrange(0, 100) <= pct_inserts:
# insert into values in two steps
# 1. insert into key0 and associated vals
rand_num_inss_0 = max(int(np.random.randn() + num_inserts_0), 1)
pdf = scipy.stats.powerlaw.rvs(TestDatasetPerformanceBase.RECENCTNESS_POWERLAW_SHAPE, size=rand_num_inss_0)
rs_0 = np.unique((pdf * n_key0).astype('int64'))
minr_0 = min(rs_0)
arr_key0 = key0_ds[minr_0:]
arr_key0 = np.insert(arr_key0, rs_0 - minr_0, np.random.randint(0, int(1e6), size=len(rs_0)))
n_key0 += len(rs_0)
key0_ds.resize((n_key0,))
key0_ds[minr_0:] = arr_key0
arr_val = val_ds[minr_0:, :]
val_shape = (val_shape[0] + len(rs_0), val_shape[1])
val_ds.resize(val_shape)
arr_val = np.insert(arr_val, rs_0 - minr_0, np.random.rand(len(rs_0), n_key1), axis=0)
val_ds[minr_0:, :] = arr_val
# 2. insert into key1 and associated vals
rand_num_inss_1 = max(int(np.random.randn() + num_inserts_1), 1)
pdf = scipy.stats.powerlaw.rvs(TestDatasetPerformanceBase.RECENCTNESS_POWERLAW_SHAPE, size=rand_num_inss_1)
rs_1 = np.unique((pdf * n_key1).astype('int64'))
minr_1 = min(rs_1)
arr_key1 = key1_ds[minr_1:]
arr_key1 = np.insert(arr_key1, rs_1 - minr_1, np.random.randint(0, int(1e6), size=len(rs_1)))
n_key1 += len(rs_1)
key1_ds.resize((n_key1,))
key1_ds[minr_1:] = arr_key1
arr_val = val_ds[:, minr_1:]
val_shape = (val_shape[0], val_shape[1] + len(rs_1))
val_ds.resize(val_shape)
arr_val = np.insert(arr_val, rs_1 - minr_1, np.random.rand(n_key0, len(rs_1)), axis=1)
val_ds[:, minr_1:] = arr_val
# append
rand_num_apps_0 = int(np.random.randn() + num_rows_per_append_0)
if rand_num_apps_0 > 0:
# append to key0 and associated vals
n_key0 += rand_num_apps_0
key0_ds.resize((n_key0,))
key0_ds[-rand_num_apps_0:] = np.random.randint(0, int(1e6), size=rand_num_apps_0)
val_shape = (n_key0, n_key1)
val_ds.resize(val_shape)
val_ds[-rand_num_apps_0:, :] = np.random.rand(rand_num_apps_0, n_key1)
| {"/versioned_hdf5/replay.py": ["/versioned_hdf5/api.py", "/versioned_hdf5/slicetools.py", "/versioned_hdf5/hashtable.py"], "/analysis/generate_data.py": ["/utils/__init__.py", "/versioned_hdf5/api.py"], "/versioned_hdf5/__init__.py": ["/versioned_hdf5/api.py", "/versioned_hdf5/replay.py"], "/benchmarks/resize.py": ["/versioned_hdf5/__init__.py"], "/versioned_hdf5/tests/test_replay.py": ["/versioned_hdf5/replay.py", "/versioned_hdf5/hashtable.py"], "/versioned_hdf5/api.py": ["/versioned_hdf5/hashtable.py"], "/benchmarks/many_chunks.py": ["/versioned_hdf5/__init__.py"], "/benchmarks/versionedhdf5file.py": ["/versioned_hdf5/__init__.py"], "/benchmarks/delete_versions.py": ["/versioned_hdf5/__init__.py", "/versioned_hdf5/replay.py"], "/versioned_hdf5/tests/test_hashtable.py": ["/versioned_hdf5/hashtable.py", "/versioned_hdf5/tests/helpers.py", "/versioned_hdf5/__init__.py"], "/analysis/performance_tests.py": ["/versioned_hdf5/__init__.py"]} |
62,223 | ArvidJB/versioned-hdf5 | refs/heads/master | /benchmarks/resize.py | # Benchmarks from https://github.com/deshaw/versioned-hdf5/issues/155
import h5py
import numpy as np
from versioned_hdf5 import VersionedHDF5File
dt = np.dtype('double')
def time_resize():
with h5py.File('foo.h5', 'w') as f:
vf = VersionedHDF5File(f)
with vf.stage_version('0') as sv:
sv.create_dataset('bar', (2, 15220, 2),
chunks=(300, 100, 2),
dtype=dt, data=np.full((2, 15220, 2), 0, dtype=dt))
with h5py.File('foo.h5', 'r+') as f:
vf = VersionedHDF5File(f)
with vf.stage_version('1') as sv:
bar = sv['bar']
bar.resize((3, 15222, 2))
time_resize.timeout = 1200
# Pure HDF5 for comparison
def time_resize_hdf5():
with h5py.File('foo.h5', 'w') as f:
f.create_dataset('bar', (2, 15220, 2),
chunks=(300, 100, 2),
dtype=dt, data=np.full((2, 15220, 2), 0,
dtype=dt),
maxshape=(None, None, None))
with h5py.File('foo.h5', 'r+') as f:
bar = f['bar']
bar.resize((3, 15222, 2))
def time_resize_and_write():
with h5py.File('foo.h5', 'w') as f:
vf = VersionedHDF5File(f)
with vf.stage_version('0') as sv:
sv.create_dataset('bar', (1, 10, 2),
chunks=(600, 2, 4),
dtype=dt, data=np.full((1, 10, 2), 0, dtype=dt))
for i in range(1, 100):
with h5py.File('foo.h5', 'r+') as f:
vf = VersionedHDF5File(f)
with vf.stage_version(str(i)) as sv:
bar = sv['bar']
bar.resize((1, (i+1) * 10, 2))
bar[:, -10:, :] = np.full((1, 10, 2), i, dtype=dt)
time_resize_and_write.timeout = 1200
def time_resize_and_write_hdf5_no_copy():
with h5py.File('foo.h5', 'w') as f:
f.create_dataset('bar', (1, 10, 2),
chunks=(600, 2, 4),
dtype=dt, data=np.full((1, 10, 2), 0, dtype=dt),
maxshape=(None, None, None))
for i in range(1, 100):
with h5py.File('foo.h5', 'r+') as f:
bar = f['bar']
bar.resize((1, (i+1) * 10, 2))
bar[:, -10:, :] = np.full((1, 10, 2), i, dtype=dt)
def time_resize_and_write_hdf5():
with h5py.File('foo.h5', 'w') as f:
f.create_dataset('bar0', (1, 10, 2),
chunks=(600, 2, 4),
dtype=dt, data=np.full((1, 10, 2), 0, dtype=dt),
maxshape=(None, None, None))
for i in range(1, 100):
with h5py.File('foo.h5', 'r+') as f:
bar = f.create_dataset('bar%d' % i, chunks=(600, 2, 4), dtype=dt,
data=f['bar%d' % (i-1)],
maxshape=(None, None, None))
bar.resize((1, (i+1) * 10, 2))
bar[:, -10:, :] = np.full((1, 10, 2), i, dtype=dt)
| {"/versioned_hdf5/replay.py": ["/versioned_hdf5/api.py", "/versioned_hdf5/slicetools.py", "/versioned_hdf5/hashtable.py"], "/analysis/generate_data.py": ["/utils/__init__.py", "/versioned_hdf5/api.py"], "/versioned_hdf5/__init__.py": ["/versioned_hdf5/api.py", "/versioned_hdf5/replay.py"], "/benchmarks/resize.py": ["/versioned_hdf5/__init__.py"], "/versioned_hdf5/tests/test_replay.py": ["/versioned_hdf5/replay.py", "/versioned_hdf5/hashtable.py"], "/versioned_hdf5/api.py": ["/versioned_hdf5/hashtable.py"], "/benchmarks/many_chunks.py": ["/versioned_hdf5/__init__.py"], "/benchmarks/versionedhdf5file.py": ["/versioned_hdf5/__init__.py"], "/benchmarks/delete_versions.py": ["/versioned_hdf5/__init__.py", "/versioned_hdf5/replay.py"], "/versioned_hdf5/tests/test_hashtable.py": ["/versioned_hdf5/hashtable.py", "/versioned_hdf5/tests/helpers.py", "/versioned_hdf5/__init__.py"], "/analysis/performance_tests.py": ["/versioned_hdf5/__init__.py"]} |
62,224 | ArvidJB/versioned-hdf5 | refs/heads/master | /versioned_hdf5/tests/test_replay.py | import h5py
import numpy as np
from versioned_hdf5.replay import (modify_metadata, delete_version,
delete_versions, _recreate_raw_data,
_recreate_hashtable,
_recreate_virtual_dataset)
from versioned_hdf5.hashtable import Hashtable
def setup_vfile(file):
with file.stage_version('version1') as g:
data = g.create_dataset('test_data', data=None, fillvalue=1., shape=(10000,), chunks=(1000,))
data[0] = 0.
g.create_dataset('test_data2', data=[1, 2, 3], chunks=(1000,))
group = g.create_group('group')
group.create_dataset('test_data4', data=[1, 2, 3, 4], chunks=(1000,))
with file.stage_version('version2') as g:
g['test_data'][2000] = 2.
g.create_dataset('test_data3', data=[1, 2, 3, 4], chunks=(1000,))
g['group']['test_data4'][0] = 5
def check_data(file, test_data_fillvalue=1., version2=True, test_data4_fillvalue=0):
assert set(file['version1']) == {'test_data', 'test_data2', 'group'}
assert file['version1']['test_data'].shape == (10000,)
assert file['version1']['test_data'][0] == 0.
assert np.all(file['version1']['test_data'][1:] == test_data_fillvalue)
if version2:
assert set(file['version2']) == {'test_data', 'test_data2',
'test_data3', 'group'}
assert file['version2']['test_data'].shape == (10000,)
assert file['version2']['test_data'][0] == 0.
assert np.all(file['version2']['test_data'][1:2000] == test_data_fillvalue)
assert file['version2']['test_data'][2000] == 2.
assert np.all(file['version2']['test_data'][2001:] == test_data_fillvalue)
assert file['version1']['test_data2'].shape == (3,)
assert np.all(file['version1']['test_data2'][:] == [1, 2, 3])
if version2:
assert file['version2']['test_data2'].shape == (3,)
assert np.all(file['version2']['test_data2'][:] == [1, 2, 3])
assert 'test_data3' not in file['version1']
if version2:
assert file['version2']['test_data3'].shape == (4,)
assert np.all(file['version2']['test_data3'][:] == [1, 2, 3, 4])
assert set(file['version1']['group']) == {'test_data4'}
assert file['version1']['group']['test_data4'].shape == (4,)
np.testing.assert_equal(file['version1']['group']['test_data4'][:4],
[1, 2, 3, 4])
assert np.all(file['version1']['group']['test_data4'][4:] == test_data4_fillvalue)
if version2:
assert set(file['version2']['group']) == {'test_data4'}
assert file['version2']['group']['test_data4'].shape == (4,)
np.testing.assert_equal(file['version2']['group']['test_data4'][:4],
[5, 2, 3, 4])
assert np.all(file['version2']['group']['test_data4'][4:] == test_data4_fillvalue)
def test_modify_metadata_compression(vfile):
setup_vfile(vfile)
f = vfile.f
assert vfile['version1']['test_data'].compression == None
assert vfile['version2']['test_data'].compression == None
assert vfile['version1']['test_data'].compression_opts == None
assert vfile['version2']['test_data'].compression_opts == None
assert vfile['version1']['test_data2'].compression == None
assert vfile['version2']['test_data2'].compression == None
assert vfile['version1']['test_data2'].compression_opts == None
assert vfile['version2']['test_data2'].compression_opts == None
assert vfile['version2']['test_data3'].compression == None
assert vfile['version2']['test_data3'].compression_opts == None
assert vfile['version1']['group']['test_data4'].compression == None
assert vfile['version2']['group']['test_data4'].compression == None
assert vfile['version1']['group']['test_data4'].compression_opts == None
assert vfile['version2']['group']['test_data4'].compression_opts == None
assert f['_version_data']['test_data']['raw_data'].compression == None
assert f['_version_data']['test_data2']['raw_data'].compression == None
assert f['_version_data']['test_data3']['raw_data'].compression == None
assert f['_version_data']['group']['test_data4']['raw_data'].compression == None
assert f['_version_data']['test_data']['raw_data'].compression_opts == None
assert f['_version_data']['test_data2']['raw_data'].compression_opts == None
assert f['_version_data']['test_data3']['raw_data'].compression_opts == None
assert f['_version_data']['group']['test_data4']['raw_data'].compression_opts == None
modify_metadata(f, 'test_data2', compression='gzip', compression_opts=3)
check_data(vfile)
assert vfile['version1']['test_data'].compression == None
assert vfile['version2']['test_data'].compression == None
assert vfile['version1']['test_data'].compression_opts == None
assert vfile['version2']['test_data'].compression_opts == None
assert vfile['version1']['test_data2'].compression == 'gzip'
assert vfile['version2']['test_data2'].compression == 'gzip'
assert vfile['version1']['test_data2'].compression_opts == 3
assert vfile['version2']['test_data2'].compression_opts == 3
assert vfile['version2']['test_data3'].compression == None
assert vfile['version2']['test_data3'].compression_opts == None
assert vfile['version1']['group']['test_data4'].compression == None
assert vfile['version2']['group']['test_data4'].compression == None
assert vfile['version1']['group']['test_data4'].compression_opts == None
assert vfile['version2']['group']['test_data4'].compression_opts == None
assert f['_version_data']['test_data']['raw_data'].compression == None
assert f['_version_data']['test_data2']['raw_data'].compression == 'gzip'
assert f['_version_data']['test_data3']['raw_data'].compression == None
assert f['_version_data']['group']['test_data4']['raw_data'].compression == None
assert f['_version_data']['test_data']['raw_data'].compression_opts == None
assert f['_version_data']['test_data2']['raw_data'].compression_opts == 3
assert f['_version_data']['test_data3']['raw_data'].compression_opts == None
assert f['_version_data']['group']['test_data4']['raw_data'].compression_opts == None
# Make sure the tmp group group has been destroyed.
assert set(f['_version_data']) == {'test_data', 'test_data2',
'test_data3', 'group', 'versions'}
assert set(f['_version_data']['group']) == {'test_data4'}
def test_modify_metadata_compressio2(vfile):
setup_vfile(vfile)
f = vfile.f
assert vfile['version1']['test_data'].compression == None
assert vfile['version2']['test_data'].compression == None
assert vfile['version1']['test_data'].compression_opts == None
assert vfile['version2']['test_data'].compression_opts == None
assert vfile['version1']['test_data2'].compression == None
assert vfile['version2']['test_data2'].compression == None
assert vfile['version1']['test_data2'].compression_opts == None
assert vfile['version2']['test_data2'].compression_opts == None
assert vfile['version2']['test_data3'].compression == None
assert vfile['version2']['test_data3'].compression_opts == None
assert vfile['version1']['group']['test_data4'].compression == None
assert vfile['version2']['group']['test_data4'].compression == None
assert vfile['version1']['group']['test_data4'].compression_opts == None
assert vfile['version2']['group']['test_data4'].compression_opts == None
assert f['_version_data']['test_data']['raw_data'].compression == None
assert f['_version_data']['test_data2']['raw_data'].compression == None
assert f['_version_data']['test_data3']['raw_data'].compression == None
assert f['_version_data']['group']['test_data4']['raw_data'].compression == None
assert f['_version_data']['test_data']['raw_data'].compression_opts == None
assert f['_version_data']['test_data2']['raw_data'].compression_opts == None
assert f['_version_data']['test_data3']['raw_data'].compression_opts == None
assert f['_version_data']['group']['test_data4']['raw_data'].compression_opts == None
modify_metadata(f, 'group/test_data4', compression='gzip', compression_opts=3)
check_data(vfile)
assert vfile['version1']['test_data'].compression == None
assert vfile['version2']['test_data'].compression == None
assert vfile['version1']['test_data'].compression_opts == None
assert vfile['version2']['test_data'].compression_opts == None
assert vfile['version1']['test_data2'].compression == None
assert vfile['version2']['test_data2'].compression == None
assert vfile['version1']['test_data2'].compression_opts == None
assert vfile['version2']['test_data2'].compression_opts == None
assert vfile['version2']['test_data3'].compression == None
assert vfile['version2']['test_data3'].compression_opts == None
assert vfile['version1']['group']['test_data4'].compression == 'gzip'
assert vfile['version2']['group']['test_data4'].compression == 'gzip'
assert vfile['version1']['group']['test_data4'].compression_opts == 3
assert vfile['version2']['group']['test_data4'].compression_opts == 3
assert f['_version_data']['test_data']['raw_data'].compression == None
assert f['_version_data']['test_data2']['raw_data'].compression == None
assert f['_version_data']['test_data3']['raw_data'].compression == None
assert f['_version_data']['group']['test_data4']['raw_data'].compression == 'gzip'
assert f['_version_data']['test_data']['raw_data'].compression_opts == None
assert f['_version_data']['test_data2']['raw_data'].compression_opts == None
assert f['_version_data']['test_data3']['raw_data'].compression_opts == None
assert f['_version_data']['group']['test_data4']['raw_data'].compression_opts == 3
# Make sure the tmp group group has been destroyed.
assert set(f['_version_data']) == {'test_data', 'test_data2',
'test_data3', 'group', 'versions'}
assert set(f['_version_data']['group']) == {'test_data4'}
def test_modify_metadata_chunks(vfile):
setup_vfile(vfile)
f = vfile.f
assert vfile['version1']['test_data'].chunks == (1000,)
assert vfile['version2']['test_data'].chunks == (1000,)
assert vfile['version1']['test_data2'].chunks == (1000,)
assert vfile['version2']['test_data2'].chunks == (1000,)
assert vfile['version2']['test_data3'].chunks == (1000,)
assert vfile['version1']['group']['test_data4'].chunks == (1000,)
assert vfile['version2']['group']['test_data4'].chunks == (1000,)
assert f['_version_data']['test_data']['raw_data'].chunks == (1000,)
assert f['_version_data']['test_data2']['raw_data'].chunks == (1000,)
assert f['_version_data']['test_data3']['raw_data'].chunks == (1000,)
assert f['_version_data']['group']['test_data4']['raw_data'].chunks == (1000,)
modify_metadata(f, 'test_data2', chunks=(500,))
check_data(vfile)
assert vfile['version1']['test_data'].chunks == (1000,)
assert vfile['version2']['test_data'].chunks == (1000,)
assert vfile['version1']['test_data2'].chunks == (500,)
assert vfile['version2']['test_data2'].chunks == (500,)
assert vfile['version2']['test_data3'].chunks == (1000,)
assert vfile['version1']['group']['test_data4'].chunks == (1000,)
assert vfile['version2']['group']['test_data4'].chunks == (1000,)
assert f['_version_data']['test_data']['raw_data'].chunks == (1000,)
assert f['_version_data']['test_data2']['raw_data'].chunks == (500,)
assert f['_version_data']['test_data3']['raw_data'].chunks == (1000,)
assert f['_version_data']['group']['test_data4']['raw_data'].chunks == (1000,)
# Make sure the tmp group group has been destroyed.
assert set(f['_version_data']) == {'test_data', 'test_data2',
'test_data3', 'group', 'versions'}
assert set(f['_version_data']['group']) == {'test_data4'}
def test_modify_metadata_chunk2(vfile):
setup_vfile(vfile)
f = vfile.f
assert vfile['version1']['test_data'].chunks == (1000,)
assert vfile['version2']['test_data'].chunks == (1000,)
assert vfile['version1']['test_data2'].chunks == (1000,)
assert vfile['version2']['test_data2'].chunks == (1000,)
assert vfile['version2']['test_data3'].chunks == (1000,)
assert vfile['version1']['group']['test_data4'].chunks == (1000,)
assert vfile['version2']['group']['test_data4'].chunks == (1000,)
assert f['_version_data']['test_data']['raw_data'].chunks == (1000,)
assert f['_version_data']['test_data2']['raw_data'].chunks == (1000,)
assert f['_version_data']['test_data3']['raw_data'].chunks == (1000,)
assert f['_version_data']['group']['test_data4']['raw_data'].chunks == (1000,)
modify_metadata(f, 'group/test_data4', chunks=(500,))
check_data(vfile)
assert vfile['version1']['test_data'].chunks == (1000,)
assert vfile['version2']['test_data'].chunks == (1000,)
assert vfile['version1']['test_data2'].chunks == (1000,)
assert vfile['version2']['test_data2'].chunks == (1000,)
assert vfile['version2']['test_data3'].chunks == (1000,)
assert vfile['version1']['group']['test_data4'].chunks == (500,)
assert vfile['version2']['group']['test_data4'].chunks == (500,)
assert f['_version_data']['test_data']['raw_data'].chunks == (1000,)
assert f['_version_data']['test_data2']['raw_data'].chunks == (1000,)
assert f['_version_data']['test_data3']['raw_data'].chunks == (1000,)
assert f['_version_data']['group']['test_data4']['raw_data'].chunks == (500,)
# Make sure the tmp group group has been destroyed.
assert set(f['_version_data']) == {'test_data', 'test_data2',
'test_data3', 'group', 'versions'}
assert set(f['_version_data']['group']) == {'test_data4'}
def test_modify_metadata_dtype(vfile):
setup_vfile(vfile)
f = vfile.f
assert vfile['version1']['test_data'].dtype == np.float64
assert vfile['version2']['test_data'].dtype == np.float64
assert vfile['version1']['test_data2'].dtype == np.int64
assert vfile['version2']['test_data2'].dtype == np.int64
assert vfile['version2']['test_data3'].dtype == np.int64
assert vfile['version1']['group']['test_data4'].dtype == np.int64
assert vfile['version2']['group']['test_data4'].dtype == np.int64
assert f['_version_data']['test_data']['raw_data'].dtype == np.float64
assert f['_version_data']['test_data2']['raw_data'].dtype == np.int64
assert f['_version_data']['test_data3']['raw_data'].dtype == np.int64
assert f['_version_data']['group']['test_data4']['raw_data'].dtype == np.int64
modify_metadata(f, 'test_data2', dtype=np.float64)
check_data(vfile)
assert vfile['version1']['test_data'].dtype == np.float64
assert vfile['version2']['test_data'].dtype == np.float64
assert vfile['version1']['test_data2'].dtype == np.float64
assert vfile['version2']['test_data2'].dtype == np.float64
assert vfile['version2']['test_data3'].dtype == np.int64
assert vfile['version1']['group']['test_data4'].dtype == np.int64
assert vfile['version2']['group']['test_data4'].dtype == np.int64
assert f['_version_data']['test_data']['raw_data'].dtype == np.float64
assert f['_version_data']['test_data2']['raw_data'].dtype == np.float64
assert f['_version_data']['test_data3']['raw_data'].dtype == np.int64
assert f['_version_data']['group']['test_data4']['raw_data'].dtype == np.int64
# Make sure the tmp group group has been destroyed.
assert set(f['_version_data']) == {'test_data', 'test_data2',
'test_data3', 'group', 'versions'}
assert set(f['_version_data']['group']) == {'test_data4'}
def test_modify_metadata_dtype2(vfile):
setup_vfile(vfile)
f = vfile.f
assert vfile['version1']['test_data'].dtype == np.float64
assert vfile['version2']['test_data'].dtype == np.float64
assert vfile['version1']['test_data2'].dtype == np.int64
assert vfile['version2']['test_data2'].dtype == np.int64
assert vfile['version2']['test_data3'].dtype == np.int64
assert vfile['version1']['group']['test_data4'].dtype == np.int64
assert vfile['version2']['group']['test_data4'].dtype == np.int64
assert f['_version_data']['test_data']['raw_data'].dtype == np.float64
assert f['_version_data']['test_data2']['raw_data'].dtype == np.int64
assert f['_version_data']['test_data3']['raw_data'].dtype == np.int64
assert f['_version_data']['group']['test_data4']['raw_data'].dtype == np.int64
modify_metadata(f, 'group/test_data4', dtype=np.float64)
check_data(vfile)
assert vfile['version1']['test_data'].dtype == np.float64
assert vfile['version2']['test_data'].dtype == np.float64
assert vfile['version1']['test_data2'].dtype == np.int64
assert vfile['version2']['test_data2'].dtype == np.int64
assert vfile['version2']['test_data3'].dtype == np.int64
assert vfile['version1']['group']['test_data4'].dtype == np.float64
assert vfile['version2']['group']['test_data4'].dtype == np.float64
assert f['_version_data']['test_data']['raw_data'].dtype == np.float64
assert f['_version_data']['test_data2']['raw_data'].dtype == np.int64
assert f['_version_data']['test_data3']['raw_data'].dtype == np.int64
assert f['_version_data']['group']['test_data4']['raw_data'].dtype == np.float64
# Make sure the tmp group group has been destroyed.
assert set(f['_version_data']) == {'test_data', 'test_data2',
'test_data3', 'group', 'versions'}
assert set(f['_version_data']['group']) == {'test_data4'}
def test_modify_metadata_fillvalue1(vfile):
setup_vfile(vfile)
f = vfile.f
assert vfile['version1']['test_data'].fillvalue == 1.
assert vfile['version2']['test_data'].fillvalue == 1.
assert vfile['version1']['test_data2'].fillvalue == 0
assert vfile['version2']['test_data2'].fillvalue == 0
assert vfile['version2']['test_data3'].fillvalue == 0
assert vfile['version1']['group']['test_data4'].fillvalue == 0
assert vfile['version2']['group']['test_data4'].fillvalue == 0
assert f['_version_data']['test_data']['raw_data'].fillvalue == 1.
assert f['_version_data']['test_data2']['raw_data'].fillvalue == 0
assert f['_version_data']['test_data3']['raw_data'].fillvalue == 0
assert f['_version_data']['group']['test_data4']['raw_data'].fillvalue == 0
modify_metadata(f, 'test_data', fillvalue=3.)
check_data(vfile, test_data_fillvalue=3.)
assert vfile['version1']['test_data'].fillvalue == 3.
assert vfile['version2']['test_data'].fillvalue == 3.
assert vfile['version1']['test_data2'].fillvalue == 0
assert vfile['version2']['test_data2'].fillvalue == 0
assert vfile['version2']['test_data3'].fillvalue == 0
assert vfile['version1']['group']['test_data4'].fillvalue == 0
assert vfile['version2']['group']['test_data4'].fillvalue == 0
assert f['_version_data']['test_data']['raw_data'].fillvalue == 3.
assert f['_version_data']['test_data2']['raw_data'].fillvalue == 0
assert f['_version_data']['test_data3']['raw_data'].fillvalue == 0
assert f['_version_data']['group']['test_data4']['raw_data'].fillvalue == 0
# Make sure the tmp group group has been destroyed.
assert set(f['_version_data']) == {'test_data', 'test_data2',
'test_data3', 'group', 'versions'}
assert set(f['_version_data']['group']) == {'test_data4'}
def test_modify_metadata_fillvalue2(vfile):
setup_vfile(vfile)
f = vfile.f
assert vfile['version1']['test_data'].fillvalue == 1.
assert vfile['version2']['test_data'].fillvalue == 1.
assert vfile['version1']['test_data2'].fillvalue == 0
assert vfile['version2']['test_data2'].fillvalue == 0
assert vfile['version2']['test_data3'].fillvalue == 0
assert vfile['version1']['group']['test_data4'].fillvalue == 0
assert vfile['version2']['group']['test_data4'].fillvalue == 0
assert f['_version_data']['test_data']['raw_data'].fillvalue == 1.
assert f['_version_data']['test_data2']['raw_data'].fillvalue == 0
assert f['_version_data']['test_data3']['raw_data'].fillvalue == 0
assert f['_version_data']['group']['test_data4']['raw_data'].fillvalue == 0
modify_metadata(f, 'test_data2', fillvalue=3)
check_data(vfile)
assert vfile['version1']['test_data'].fillvalue == 1.
assert vfile['version2']['test_data'].fillvalue == 1.
assert vfile['version1']['test_data2'].fillvalue == 3
assert vfile['version2']['test_data2'].fillvalue == 3
assert vfile['version2']['test_data3'].fillvalue == 0
assert vfile['version1']['group']['test_data4'].fillvalue == 0
assert vfile['version2']['group']['test_data4'].fillvalue == 0
assert f['_version_data']['test_data']['raw_data'].fillvalue == 1.
assert f['_version_data']['test_data2']['raw_data'].fillvalue == 3
assert f['_version_data']['test_data3']['raw_data'].fillvalue == 0
assert f['_version_data']['group']['test_data4']['raw_data'].fillvalue == 0
# Make sure the tmp group group has been destroyed.
assert set(f['_version_data']) == {'test_data', 'test_data2',
'test_data3', 'group', 'versions'}
assert set(f['_version_data']['group']) == {'test_data4'}
def test_modify_metadata_fillvalue3(vfile):
setup_vfile(vfile)
f = vfile.f
assert vfile['version1']['test_data'].fillvalue == 1.
assert vfile['version2']['test_data'].fillvalue == 1.
assert vfile['version1']['test_data2'].fillvalue == 0
assert vfile['version2']['test_data2'].fillvalue == 0
assert vfile['version2']['test_data3'].fillvalue == 0
assert vfile['version1']['group']['test_data4'].fillvalue == 0
assert vfile['version2']['group']['test_data4'].fillvalue == 0
assert f['_version_data']['test_data']['raw_data'].fillvalue == 1.
assert f['_version_data']['test_data2']['raw_data'].fillvalue == 0
assert f['_version_data']['test_data3']['raw_data'].fillvalue == 0
assert f['_version_data']['group']['test_data4']['raw_data'].fillvalue == 0
modify_metadata(f, 'group/test_data4', fillvalue=2)
check_data(vfile)
assert vfile['version1']['test_data'].fillvalue == 1.
assert vfile['version2']['test_data'].fillvalue == 1.
assert vfile['version1']['test_data2'].fillvalue == 0
assert vfile['version2']['test_data2'].fillvalue == 0
assert vfile['version2']['test_data3'].fillvalue == 0
assert vfile['version1']['group']['test_data4'].fillvalue == 2
assert vfile['version2']['group']['test_data4'].fillvalue == 2
assert f['_version_data']['test_data']['raw_data'].fillvalue == 1.
assert f['_version_data']['test_data2']['raw_data'].fillvalue == 0
assert f['_version_data']['test_data3']['raw_data'].fillvalue == 0
assert f['_version_data']['group']['test_data4']['raw_data'].fillvalue == 2
# Make sure the tmp group group has been destroyed.
assert set(f['_version_data']) == {'test_data', 'test_data2',
'test_data3', 'group', 'versions'}
assert set(f['_version_data']['group']) == {'test_data4'}
def test_delete_version(vfile):
setup_vfile(vfile)
f = vfile.f
delete_version(f, 'version2')
check_data(vfile, version2=False)
assert list(vfile) == ['version1']
assert set(f['_version_data']) == {'group', 'test_data', 'test_data2', 'versions'}
assert set(f['_version_data']['group']) == {'test_data4'}
assert not np.isin(2., f['_version_data']['test_data']['raw_data'][:])
assert not np.isin(5, f['_version_data']['group']['test_data4']['raw_data'][:])
def test_delete_versions(vfile):
setup_vfile(vfile)
with vfile.stage_version('version3') as g:
g['test_data'][2000] = 3.
g.create_dataset('test_data4', data=[1, 2, 3, 4], chunks=(1000,))
f = vfile.f
delete_versions(f, ['version2', 'version3'])
check_data(vfile, version2=False)
assert list(vfile) == ['version1']
assert set(f['_version_data']) == {'group', 'test_data', 'test_data2', 'versions'}
assert set(f['_version_data']['group']) == {'test_data4'}
assert not np.isin(2., f['_version_data']['test_data']['raw_data'][:])
assert not np.isin(5, f['_version_data']['group']['test_data4']['raw_data'][:])
def test_delete_versions_no_data(vfile):
with vfile.stage_version('version1') as g:
g.create_dataset('data', maxshape=(None, None), chunks=(20, 20), shape=(5, 5), dtype=np.dtype('int8'), fillvalue=0)
with vfile.stage_version('version2') as g:
g['data'][0] = 1
f = vfile.f
delete_versions(f, ['version2'])
assert list(vfile) == ['version1']
assert list(vfile['version1']) == ['data']
assert vfile['version1']['data'].shape == (5, 5)
assert np.all(vfile['version1']['data'][:] == 0)
def test_delete_versions_no_data2(vfile):
with vfile.stage_version('version1') as g:
g.create_dataset('data', maxshape=(None, None), chunks=(20, 20), shape=(5, 5), dtype=np.dtype('int8'), fillvalue=0)
with vfile.stage_version('version2') as g:
g['data'][0] = 1
f = vfile.f
delete_versions(f, ['version1'])
assert list(vfile) == ['version2']
assert list(vfile['version2']) == ['data']
assert vfile['version2']['data'].shape == (5, 5)
assert np.all(vfile['version2']['data'][1:] == 0)
assert np.all(vfile['version2']['data'][0] == 1)
def test_delete_versions_nested_groups(vfile):
data = []
with vfile.stage_version('r0') as sv:
data_group = sv.create_group('group1/group2')
data.append(np.arange(500))
data_group.create_dataset('test_data', maxshape=(None,), chunks=(1000), data=data[0])
for i in range(1, 11):
with vfile.stage_version(f'r{i}') as sv:
data.append(np.random.randint(0, 1000, size=500))
sv['group1']['group2']['test_data'][:] = data[-1]
assert set(vfile) == {'r0', 'r1', 'r2', 'r3', 'r4', 'r5', 'r6', 'r7', 'r8', 'r9', 'r10'}
for i in range(11):
assert list(vfile[f'r{i}']) == ['group1'], i
assert list(vfile[f'r{i}']['group1']) == ['group2']
assert list(vfile[f'r{i}']['group1']['group2']) == ['test_data']
np.testing.assert_equal(vfile[f'r{i}']['group1']['group2']['test_data'][:], data[i])
delete_versions(vfile, ['r3', 'r6'])
assert set(vfile) == {'r0', 'r1', 'r2', 'r4', 'r5', 'r7', 'r8', 'r9', 'r10'}
for i in range(11):
if i in [3, 6]:
continue
assert list(vfile[f'r{i}']) == ['group1'], i
assert list(vfile[f'r{i}']['group1']) == ['group2']
assert list(vfile[f'r{i}']['group1']['group2']) == ['test_data']
np.testing.assert_equal(vfile[f'r{i}']['group1']['group2']['test_data'][:], data[i])
def test_delete_versions_prev_version(vfile):
with vfile.stage_version('r0') as g:
g['foo'] = np.array([1, 2, 3])
for i in range(1, 11):
with vfile.stage_version(f'r{i}') as g:
g['foo'][:] = np.array([1, i, 3])
delete_versions(vfile, ['r1', 'r5', 'r8'])
prev_versions = {
'__first_version__': None,
'r0': '__first_version__',
'r2': 'r0',
'r3': 'r2',
'r4': 'r3',
'r6': 'r4',
'r7': 'r6',
'r9': 'r7',
'r10': 'r9',
}
for v in vfile:
assert vfile[v].attrs['prev_version'] == prev_versions[v]
def setup2(vfile):
with vfile.stage_version('version1') as g:
g.create_dataset('test_data',
data=np.arange(20000).reshape((1000, 20)),
chunks=(101,11))
with vfile.stage_version('version2') as g:
g['test_data'][::200] = -g['test_data'][::200]
def test_recreate_raw_data(vfile):
setup2(vfile)
chunks_map = _recreate_raw_data(vfile.f, 'test_data', ['version1'], tmp=True)
assert len(chunks_map) == 20
raw_data = vfile.f['_version_data/test_data/raw_data']
tmp_raw_data = vfile.f['_version_data/test_data/_tmp_raw_data']
assert raw_data.shape == (3030, 11)
assert tmp_raw_data.shape == (2020, 11)
for old, new in chunks_map.items():
a = raw_data[old.raw]
b = tmp_raw_data[new.raw]
assert a.shape == b.shape
np.testing.assert_equal(a, b)
def test_recreate_hashtable(vfile):
setup2(vfile)
chunks_map = _recreate_raw_data(vfile.f, 'test_data', ['version1'], tmp=False)
# Recreate a separate, independent version, with the dataset as it would
# be with version1 deleted.
with vfile.stage_version('version2_2', prev_version='') as g:
g.create_dataset('test_data2',
data=np.arange(20000).reshape((1000, 20)),
chunks=(101,11))
g['test_data2'][::200] = -g['test_data2'][::200]
# orig_hashtable = Hashtable(vfile.f, 'test_data')
_recreate_hashtable(vfile.f, 'test_data', chunks_map, tmp=True)
new_hash_table = Hashtable(vfile.f, 'test_data',
hash_table_name='_tmp_hash_table')
new_hash_table2 = Hashtable(vfile.f, 'test_data2')
d1 = dict(new_hash_table)
d2 = dict(new_hash_table2)
assert d1.keys() == d2.keys()
# The exact slices won't be the same because raw data won't be in the same
# order
for h in d1:
np.testing.assert_equal(
vfile.f['_version_data/test_data/raw_data'][d1[h].raw],
vfile.f['_version_data/test_data2/raw_data'][d2[h].raw],
)
def test_recreate_virtual_dataset(vfile):
setup2(vfile)
orig_virtual_dataset = vfile.f['_version_data/versions/version2/test_data'][:]
chunks_map = _recreate_raw_data(vfile.f, 'test_data', ['version1'], tmp=False)
_recreate_hashtable(vfile.f, 'test_data', chunks_map, tmp=False)
_recreate_virtual_dataset(vfile.f, 'test_data', ['version2'], chunks_map, tmp=True)
new_virtual_dataset = vfile.f['_version_data/versions/version2/_tmp_test_data'][:]
np.testing.assert_equal(orig_virtual_dataset, new_virtual_dataset)
def test_delete_versions2(vfile):
setup2(vfile)
data = np.arange(20000).reshape((1000, 20))
data[::200] = -data[::200]
assert vfile['version2']['test_data'].shape == data.shape
delete_versions(vfile, ['version1'])
assert list(vfile) == ['version2']
assert list(vfile['version2']) == ['test_data']
assert vfile['version2']['test_data'].shape == data.shape
np.testing.assert_equal(vfile['version2']['test_data'][:], data)
assert set(vfile.f['_version_data/test_data/raw_data'][:].flat) == set(data.flat)
def test_delete_versions_variable_length_strings(vfile):
with vfile.stage_version('r0') as sv:
data = np.array(['foo'], dtype='O')
sv.create_dataset('bar', data=data, dtype=h5py.string_dtype(encoding='ascii'))
for i in range(1, 11):
with vfile.stage_version('r{}'.format(i)) as sv:
sv['bar'].resize((i+1,))
sv['bar'][i] = 'foo'
delete_versions(vfile, ['r2', 'r4', 'r6'])
def test_delete_versions_fillvalue_only_dataset(vfile):
with vfile.stage_version('r0') as sv:
sv.create_dataset('fillvalue_only', shape=(6,),
dtype=np.dtype('int64'), data=None,
maxshape=(None,), chunks=(10000,), fillvalue=0)
sv.create_dataset('has_data', shape=(6,), dtype=np.dtype('int64'),
data=np.arange(6), maxshape=(None,),
chunks=(10000,), fillvalue=0)
with vfile.stage_version('r1') as sv:
sv['has_data'] = np.arange(5, -1, -1)
delete_versions(vfile, ['r0'])
with vfile.stage_version('r2') as sv:
sv['fillvalue_only'][0] = 1
assert set(vfile) == {'r1', 'r2'}
assert set(vfile['r1']) == {'fillvalue_only', 'has_data'}
assert set(vfile['r2']) == {'fillvalue_only', 'has_data'}
np.testing.assert_equal(vfile['r1']['fillvalue_only'][:], 0)
np.testing.assert_equal(vfile['r2']['fillvalue_only'][:],
np.array([1, 0, 0, 0, 0, 0]))
np.testing.assert_equal(vfile['r1']['has_data'][:], np.arange(5, -1, -1))
np.testing.assert_equal(vfile['r2']['has_data'][:], np.arange(5, -1, -1))
def test_delete_versions_current_version(vfile):
with vfile.stage_version('r0') as sv:
sv.create_dataset('bar', data=np.arange(10))
for i in range(1, 11):
with vfile.stage_version('r{}'.format(i)) as sv:
sv['bar'] = np.arange(10 + i)
delete_versions(vfile, ['r2', 'r4', 'r6', 'r8', 'r9', 'r10'])
cv = vfile.current_version
assert cv == 'r7'
np.testing.assert_equal(vfile[cv]['bar'][:], np.arange(17))
def test_variable_length_strings(vfile):
# Warning: this test will segfault with h5py 3.7.0
# (https://github.com/h5py/h5py/pull/2111 fixes it)
with vfile.stage_version('r0') as sv:
g = sv.create_group('data')
dt = h5py.string_dtype(encoding='ascii')
g.create_dataset('foo', data=['foo', 'bar'], dtype=dt)
for i in range(1, 7):
with vfile.stage_version(f'r{i}') as sv:
sv['data/foo'] = np.array([f'foo{i}', f'bar{i}'], dtype='O')
delete_versions(vfile, ['r1'])
def test_delete_empty_dataset(vfile):
"""Test that deleting an empty dataset executes successfully."""
with vfile.stage_version("r0") as sv:
sv.create_dataset(
"key0",
data=np.array([]),
maxshape=(None,),
chunks=(10000,),
compression="lzf",
)
# Raw data should be filled with fillvalue, but actual current
# version dataset should have size 0.
assert vfile.f['_version_data/key0/raw_data'][:].size == 10000
assert vfile[vfile.current_version]['key0'][:].size == 0
# Create a new version, checking again the size
with vfile.stage_version("r1") as sv:
sv["key0"].resize((0,))
assert vfile.f['_version_data/key0/raw_data'][:].size == 10000
assert vfile[vfile.current_version]['key0'][:].size == 0
# Deleting a prior version should not change the data in the current version
delete_versions(vfile, ["r0"])
assert vfile.f['_version_data/key0/raw_data'][:].size == 10000
assert vfile[vfile.current_version]['key0'][:].size == 0
# Create a new version, then check if the data is the correct size
with vfile.stage_version("r2") as sv:
sv["key0"].resize((0,))
assert vfile.f['_version_data/key0/raw_data'][:].size == 10000
assert vfile[vfile.current_version]['key0'][:].size == 0
| {"/versioned_hdf5/replay.py": ["/versioned_hdf5/api.py", "/versioned_hdf5/slicetools.py", "/versioned_hdf5/hashtable.py"], "/analysis/generate_data.py": ["/utils/__init__.py", "/versioned_hdf5/api.py"], "/versioned_hdf5/__init__.py": ["/versioned_hdf5/api.py", "/versioned_hdf5/replay.py"], "/benchmarks/resize.py": ["/versioned_hdf5/__init__.py"], "/versioned_hdf5/tests/test_replay.py": ["/versioned_hdf5/replay.py", "/versioned_hdf5/hashtable.py"], "/versioned_hdf5/api.py": ["/versioned_hdf5/hashtable.py"], "/benchmarks/many_chunks.py": ["/versioned_hdf5/__init__.py"], "/benchmarks/versionedhdf5file.py": ["/versioned_hdf5/__init__.py"], "/benchmarks/delete_versions.py": ["/versioned_hdf5/__init__.py", "/versioned_hdf5/replay.py"], "/versioned_hdf5/tests/test_hashtable.py": ["/versioned_hdf5/hashtable.py", "/versioned_hdf5/tests/helpers.py", "/versioned_hdf5/__init__.py"], "/analysis/performance_tests.py": ["/versioned_hdf5/__init__.py"]} |
62,225 | ArvidJB/versioned-hdf5 | refs/heads/master | /versioned_hdf5/api.py | """
Public API functions
Everything outside of this file is considered internal API and is subject to
change.
"""
import logging
import numpy as np
from typing import Set, Optional
from contextlib import contextmanager
import datetime
from .backend import initialize
from .versions import (create_version_group, commit_version,
get_version_by_timestamp, get_nth_previous_version,
set_current_version, all_versions, delete_version, )
from .wrappers import InMemoryGroup
from .hashtable import Hashtable
logger = logging.getLogger(__name__)
class VersionedHDF5File:
"""
A Versioned HDF5 File
This is the main entry-point of the library. To use a versioned HDF5 file,
pass a h5py file to constructor. The methods on the resulting object can
be used to view and create versions.
Note that versioned HDF5 files have a special structure and should not be
modified directly. Also note that once a version is created in the file,
it should be treated as read-only. Some protections are in place to
prevent accidental modification, but it is not possible in the HDF5 layer
to make a dataset or group read-only, so modifications made outside of
this library could result in breaking things.
>>> import h5py
>>> f = h5py.File('file.h5') # doctest: +SKIP
>>> from versioned_hdf5 import VersionedHDF5File
>>> file = VersionedHDF5File(f) # doctest: +SKIP
Access versions using indexing
>>> version1 = file['version1'] # doctest: +SKIP
This returns a group containing the datasets for that version.
To create a new version, use :func:`stage_version`.
>>> with file.stage_version('version2') as group: # doctest: +SKIP
... group['dataset'] = ... # Modify the group
...
When the context manager exits, the version will be written to the file.
Finally, use
>>> file.close() # doctest: +SKIP
to close the `VersionedHDF5File` object (note that the `h5py` file object
should be closed separately.)
"""
def __init__(self, f):
self.f = f
if '_version_data' not in f:
initialize(f)
self._version_data = f['_version_data']
self._versions = self._version_data['versions']
self._closed = False
self._version_cache = {}
@property
def closed(self):
if self._closed:
return self._closed
if not self.f.id:
self._closed = True
return self._closed
@property
def current_version(self):
"""
The current version.
The current version is used as the default previous version to
:func:`stage_version`, and is also used for negative integer version
indexing (the current version is `self[0]`).
"""
return self._versions.attrs['current_version']
@current_version.setter
def current_version(self, version_name):
set_current_version(self.f, version_name)
self._version_cache.clear()
def get_version_by_name(self, version):
if version.startswith('/'):
raise ValueError("Versions cannot start with '/'. VersionedHDF5File should not be used to access the top-level of an h5py File.")
if version == '':
version = '__first_version__'
if version not in self._versions:
raise KeyError(f"Version {version!r} not found")
g = self._versions[version]
if not g.attrs['committed']:
raise ValueError("Version groups cannot accessed from the VersionedHDF5File object before they are committed.")
if self.f.file.mode == 'r':
return g
return InMemoryGroup(g._id, _committed=True)
def get_version_by_timestamp(self, timestamp, exact=False):
version = get_version_by_timestamp(self.f, timestamp, exact=exact)
g = self._versions[version]
if not g.attrs['committed']:
raise ValueError("Version groups cannot accessed from the VersionedHDF5File object before they are committed.")
if self.f.file.mode == 'r':
return g
return InMemoryGroup(g._id, _committed=True)
def __getitem__(self, item):
if self.closed:
raise ValueError("File is closed")
if item in self._version_cache:
# We don't cache version names because those are already cheap to
# lookup.
return self._version_cache[item]
if item is None:
return self.get_version_by_name(self.current_version)
elif isinstance(item, str):
return self.get_version_by_name(item)
elif isinstance(item, (int, np.integer)):
if item > 0:
raise IndexError("Integer version slice must be negative")
self._version_cache[item] = self.get_version_by_name(get_nth_previous_version(self.f,
self.current_version, -item))
return self._version_cache[item]
elif isinstance(item, (datetime.datetime, np.datetime64)):
self._version_cache[item] = self.get_version_by_timestamp(item)
return self._version_cache[item]
else:
raise KeyError(f"Don't know how to get the version for {item!r}")
def __delitem__(self, item):
"""
Delete a version
If the version is the current version, the new current version will be
set to the previous version.
"""
if not isinstance(item, str):
raise NotImplementedError("del is only supported for string keys")
if item not in self._versions:
raise KeyError(item)
new_current = self.current_version if item != self.current_version else self[item].attrs['prev_version']
delete_version(self.f, item, new_current)
self._version_cache.clear()
def __iter__(self):
return all_versions(self.f, include_first=False)
@contextmanager
def stage_version(self, version_name: str, prev_version=None,
make_current=True, timestamp=None):
"""
Return a context manager to stage a new version
The context manager returns a group, which should be modified in-place
to build the new version. When the context manager exits, the new
version will be written into the file.
`version_name` should be the name for the version.
`prev_version` should be the previous version which this version is
based on. The group returned by the context manager will mirror this
previous version. If it is `None` (the default), the previous
version will be the current version. If it is `''`, there will be no
previous version.
If `make_current` is `True` (the default), the new version will be set
as the current version. The current version is used as the default
`prev_version` for any future `stage_version` call.
`timestamp` may be a datetime.datetime or np.datetime64 timestamp for
the version. Note that datetime.datetime timestamps must be in the UTC
timezone (np.datetime64 timestamps are not timezone aware and are
assumed to be UTC). If `timestamp` is `None` (the default) the current
time when the context manager exits is used. When passing in a manual
timestamp, be aware that no consistency checks are made to ensure that
version timestamps are linear or not duplicated.
"""
if self.closed:
raise ValueError("File is closed")
old_current = self.current_version
group = create_version_group(self.f, version_name,
prev_version=prev_version)
try:
yield group
group.close()
commit_version(group, group.datasets(), make_current=make_current,
chunks=group.chunks,
compression=group.compression,
compression_opts=group.compression_opts,
timestamp=timestamp)
self._log_version_diff_stats(old_current, self.current_version)
except:
delete_version(self.f, version_name, old_current)
raise
finally:
self._version_cache.clear()
def close(self):
"""
Make sure the VersionedHDF5File object is no longer reachable.
"""
if not self._closed:
del self.f
del self._version_data
del self._versions
self._closed = True
def __repr__(self):
"""
Prints friendly status information.
These messages are intended to be similar to h5py messages.
"""
if self.closed:
return "<Closed VersionedHDF5File>"
else:
return f"<VersionedHDF5File object \"{self.f.filename}\" (mode" \
f" {self.f.mode})>"
def _get_hashes(self, name: str) -> Set[bytes]:
"""Get a set of hashes for the chunks in the dataset.
Parameters
----------
name : str
Name of the dataset for which hashes are to be generated
Returns
-------
Set[bytes]
A set of hashes for the dataset
"""
with Hashtable(self.f, name) as hashtable:
return set(hashtable.keys())
def _log_version_diff_stats(
self,
old_version: Optional[str] = None,
new_version: Optional[str] = None,
):
"""Log some stats about differences between two versions.
Parameters
----------
old_version : Optional[str]
Old version of the data to compare
new_version : Optional[str]
New version of the data to compare
"""
old_datasets, new_datasets = {}, {}
if old_version in self:
old_datasets = self[old_version].datasets()
if new_version in self:
new_datasets = self[new_version].datasets()
msg = [""]
for name in sorted(set(old_datasets.keys()) | set(new_datasets.keys())):
old_dataset = old_datasets.get(name, None)
new_dataset = new_datasets.get(name, None)
old_hashes, new_hashes = set(), set()
old_shape, new_shape = None, None
old_chunks, new_chunks = None, None
if old_dataset:
old_shape = old_dataset.shape
old_chunks = old_dataset.chunks
old_hashes = self._get_hashes(name)
if new_dataset:
new_shape = new_dataset.shape
new_chunks = new_dataset.chunks
new_hashes = self._get_hashes(name)
chunks_reused = len(old_hashes & new_hashes)
new_chunks_written = len(new_hashes - old_hashes)
msg.append(
f" {name}: Shape: {old_shape} -> {new_shape}; "
f"Chunks: {old_chunks} -> {new_chunks}; "
f"New chunks written: {new_chunks_written}; "
f"Number of chunks reused: {chunks_reused}"
)
logger.debug("\n".join(msg))
| {"/versioned_hdf5/replay.py": ["/versioned_hdf5/api.py", "/versioned_hdf5/slicetools.py", "/versioned_hdf5/hashtable.py"], "/analysis/generate_data.py": ["/utils/__init__.py", "/versioned_hdf5/api.py"], "/versioned_hdf5/__init__.py": ["/versioned_hdf5/api.py", "/versioned_hdf5/replay.py"], "/benchmarks/resize.py": ["/versioned_hdf5/__init__.py"], "/versioned_hdf5/tests/test_replay.py": ["/versioned_hdf5/replay.py", "/versioned_hdf5/hashtable.py"], "/versioned_hdf5/api.py": ["/versioned_hdf5/hashtable.py"], "/benchmarks/many_chunks.py": ["/versioned_hdf5/__init__.py"], "/benchmarks/versionedhdf5file.py": ["/versioned_hdf5/__init__.py"], "/benchmarks/delete_versions.py": ["/versioned_hdf5/__init__.py", "/versioned_hdf5/replay.py"], "/versioned_hdf5/tests/test_hashtable.py": ["/versioned_hdf5/hashtable.py", "/versioned_hdf5/tests/helpers.py", "/versioned_hdf5/__init__.py"], "/analysis/performance_tests.py": ["/versioned_hdf5/__init__.py"]} |
62,226 | ArvidJB/versioned-hdf5 | refs/heads/master | /benchmarks/many_chunks.py | # Benchmarks from https://github.com/deshaw/versioned-hdf5/issues/167
import h5py
import numpy as np
from versioned_hdf5 import VersionedHDF5File
dt = np.dtype('double')
def time_many_chunks():
d0 = 2
d1 = 15220
d2 = 2
shape = (d0, d1, d2)
chunks = (600, 2, 4)
with h5py.File('foo.h5', 'w') as f:
vf = VersionedHDF5File(f)
with vf.stage_version('0') as sv:
sv.create_dataset('bar', shape=shape, maxshape=(None, None, None),
chunks=chunks, dtype=dt,
data=np.full(shape, 0, dtype=dt))
i = 1
with h5py.File('foo.h5', 'r+') as f:
vf = VersionedHDF5File(f)
with vf.stage_version(str(i)) as sv:
sv['bar'][:] = np.full(shape, i, dtype=dt)
def time_many_chunks_integer_index():
d0 = 2
d1 = 15220
d2 = 2
shape = (d0, d1, d2)
chunks = (600, 2, 4)
with h5py.File('foo.h5', 'w') as f:
vf = VersionedHDF5File(f)
with vf.stage_version('0') as sv:
sv.create_dataset('bar', shape=shape, maxshape=(None, None, None),
chunks=chunks, dtype=dt,
data=np.full(shape, 0, dtype=dt))
i = 1
with h5py.File('foo.h5', 'r+') as f:
vf = VersionedHDF5File(f)
with vf.stage_version(str(i)) as sv:
i2 = np.random.choice(d1, 30, replace=False)
i2 = np.sort(i2)
sv['bar'][:, i2, :] = np.full((d0, len(i2), d2), i, dtype=dt)
def time_many_chunks_arange():
d0 = 2
d1 = 15220
d2 = 2
shape = (d0, d1, d2)
chunks = (600, 2, 4)
with h5py.File('foo.h5', 'w') as f:
vf = VersionedHDF5File(f)
with vf.stage_version('0') as sv:
sv.create_dataset('bar', shape=shape, maxshape=(None, None, None),
chunks=chunks, dtype=dt,
data=np.arange(np.prod(shape), dtype=dt).reshape(shape))
| {"/versioned_hdf5/replay.py": ["/versioned_hdf5/api.py", "/versioned_hdf5/slicetools.py", "/versioned_hdf5/hashtable.py"], "/analysis/generate_data.py": ["/utils/__init__.py", "/versioned_hdf5/api.py"], "/versioned_hdf5/__init__.py": ["/versioned_hdf5/api.py", "/versioned_hdf5/replay.py"], "/benchmarks/resize.py": ["/versioned_hdf5/__init__.py"], "/versioned_hdf5/tests/test_replay.py": ["/versioned_hdf5/replay.py", "/versioned_hdf5/hashtable.py"], "/versioned_hdf5/api.py": ["/versioned_hdf5/hashtable.py"], "/benchmarks/many_chunks.py": ["/versioned_hdf5/__init__.py"], "/benchmarks/versionedhdf5file.py": ["/versioned_hdf5/__init__.py"], "/benchmarks/delete_versions.py": ["/versioned_hdf5/__init__.py", "/versioned_hdf5/replay.py"], "/versioned_hdf5/tests/test_hashtable.py": ["/versioned_hdf5/hashtable.py", "/versioned_hdf5/tests/helpers.py", "/versioned_hdf5/__init__.py"], "/analysis/performance_tests.py": ["/versioned_hdf5/__init__.py"]} |
62,227 | ArvidJB/versioned-hdf5 | refs/heads/master | /utils/__init__.py | from __future__ import (absolute_import, division, print_function, with_statement)
import shutil
import tempfile
from contextlib import contextmanager
@contextmanager
def temp_dir_ctx():
tmp_dir = tempfile.mkdtemp()
yield tmp_dir
shutil.rmtree(tmp_dir)
| {"/versioned_hdf5/replay.py": ["/versioned_hdf5/api.py", "/versioned_hdf5/slicetools.py", "/versioned_hdf5/hashtable.py"], "/analysis/generate_data.py": ["/utils/__init__.py", "/versioned_hdf5/api.py"], "/versioned_hdf5/__init__.py": ["/versioned_hdf5/api.py", "/versioned_hdf5/replay.py"], "/benchmarks/resize.py": ["/versioned_hdf5/__init__.py"], "/versioned_hdf5/tests/test_replay.py": ["/versioned_hdf5/replay.py", "/versioned_hdf5/hashtable.py"], "/versioned_hdf5/api.py": ["/versioned_hdf5/hashtable.py"], "/benchmarks/many_chunks.py": ["/versioned_hdf5/__init__.py"], "/benchmarks/versionedhdf5file.py": ["/versioned_hdf5/__init__.py"], "/benchmarks/delete_versions.py": ["/versioned_hdf5/__init__.py", "/versioned_hdf5/replay.py"], "/versioned_hdf5/tests/test_hashtable.py": ["/versioned_hdf5/hashtable.py", "/versioned_hdf5/tests/helpers.py", "/versioned_hdf5/__init__.py"], "/analysis/performance_tests.py": ["/versioned_hdf5/__init__.py"]} |
62,228 | ArvidJB/versioned-hdf5 | refs/heads/master | /benchmarks/versionedhdf5file.py | import h5py
import numpy as np
from versioned_hdf5 import VersionedHDF5File
class TimeDatetimeAccess:
def setup(self):
with h5py.File('foo.h5', 'w') as f:
vf = VersionedHDF5File(f)
with vf.stage_version('0') as sv:
sv.create_dataset('bar', data=np.random.rand(10))
for i in range(1, 100):
with vf.stage_version(str(i)) as sv:
sv['bar'][:] = np.random.rand(10)
self.dt = np.datetime64(vf[str(50)].attrs['timestamp'])
def time_version_by_datetime(self):
# Based on https://github.com/deshaw/versioned-hdf5/issues/170
with h5py.File('foo.h5', 'r') as f:
vf = VersionedHDF5File(f)
for _ in range(100):
_ = vf[self.dt]['bar'][:]
| {"/versioned_hdf5/replay.py": ["/versioned_hdf5/api.py", "/versioned_hdf5/slicetools.py", "/versioned_hdf5/hashtable.py"], "/analysis/generate_data.py": ["/utils/__init__.py", "/versioned_hdf5/api.py"], "/versioned_hdf5/__init__.py": ["/versioned_hdf5/api.py", "/versioned_hdf5/replay.py"], "/benchmarks/resize.py": ["/versioned_hdf5/__init__.py"], "/versioned_hdf5/tests/test_replay.py": ["/versioned_hdf5/replay.py", "/versioned_hdf5/hashtable.py"], "/versioned_hdf5/api.py": ["/versioned_hdf5/hashtable.py"], "/benchmarks/many_chunks.py": ["/versioned_hdf5/__init__.py"], "/benchmarks/versionedhdf5file.py": ["/versioned_hdf5/__init__.py"], "/benchmarks/delete_versions.py": ["/versioned_hdf5/__init__.py", "/versioned_hdf5/replay.py"], "/versioned_hdf5/tests/test_hashtable.py": ["/versioned_hdf5/hashtable.py", "/versioned_hdf5/tests/helpers.py", "/versioned_hdf5/__init__.py"], "/analysis/performance_tests.py": ["/versioned_hdf5/__init__.py"]} |
62,229 | ArvidJB/versioned-hdf5 | refs/heads/master | /versioned_hdf5/tests/helpers.py | import h5py
from ..backend import initialize
def setup_vfile(file_name='file.hdf5', *, version_name=None):
f = h5py.File(file_name, 'w')
initialize(f)
if version_name:
if isinstance(version_name, str):
version_name = [version_name]
for name in version_name:
f['_version_data/versions'].create_group(name)
return f
| {"/versioned_hdf5/replay.py": ["/versioned_hdf5/api.py", "/versioned_hdf5/slicetools.py", "/versioned_hdf5/hashtable.py"], "/analysis/generate_data.py": ["/utils/__init__.py", "/versioned_hdf5/api.py"], "/versioned_hdf5/__init__.py": ["/versioned_hdf5/api.py", "/versioned_hdf5/replay.py"], "/benchmarks/resize.py": ["/versioned_hdf5/__init__.py"], "/versioned_hdf5/tests/test_replay.py": ["/versioned_hdf5/replay.py", "/versioned_hdf5/hashtable.py"], "/versioned_hdf5/api.py": ["/versioned_hdf5/hashtable.py"], "/benchmarks/many_chunks.py": ["/versioned_hdf5/__init__.py"], "/benchmarks/versionedhdf5file.py": ["/versioned_hdf5/__init__.py"], "/benchmarks/delete_versions.py": ["/versioned_hdf5/__init__.py", "/versioned_hdf5/replay.py"], "/versioned_hdf5/tests/test_hashtable.py": ["/versioned_hdf5/hashtable.py", "/versioned_hdf5/tests/helpers.py", "/versioned_hdf5/__init__.py"], "/analysis/performance_tests.py": ["/versioned_hdf5/__init__.py"]} |
62,230 | ArvidJB/versioned-hdf5 | refs/heads/master | /conftest.py | def pytest_report_header(config):
import h5py
import ndindex
return f"project deps: h5py-{h5py.__version__}, ndindex-{ndindex.__version__}"
| {"/versioned_hdf5/replay.py": ["/versioned_hdf5/api.py", "/versioned_hdf5/slicetools.py", "/versioned_hdf5/hashtable.py"], "/analysis/generate_data.py": ["/utils/__init__.py", "/versioned_hdf5/api.py"], "/versioned_hdf5/__init__.py": ["/versioned_hdf5/api.py", "/versioned_hdf5/replay.py"], "/benchmarks/resize.py": ["/versioned_hdf5/__init__.py"], "/versioned_hdf5/tests/test_replay.py": ["/versioned_hdf5/replay.py", "/versioned_hdf5/hashtable.py"], "/versioned_hdf5/api.py": ["/versioned_hdf5/hashtable.py"], "/benchmarks/many_chunks.py": ["/versioned_hdf5/__init__.py"], "/benchmarks/versionedhdf5file.py": ["/versioned_hdf5/__init__.py"], "/benchmarks/delete_versions.py": ["/versioned_hdf5/__init__.py", "/versioned_hdf5/replay.py"], "/versioned_hdf5/tests/test_hashtable.py": ["/versioned_hdf5/hashtable.py", "/versioned_hdf5/tests/helpers.py", "/versioned_hdf5/__init__.py"], "/analysis/performance_tests.py": ["/versioned_hdf5/__init__.py"]} |
62,231 | ArvidJB/versioned-hdf5 | refs/heads/master | /benchmarks/delete_versions.py | import h5py
from versioned_hdf5 import VersionedHDF5File
import numpy
import tempfile
import shutil
import os
filename = 'delete_versions_bench.h5'
try:
from versioned_hdf5 import delete_versions
except ImportError:
from versioned_hdf5.replay import recreate_dataset, tmp_group, swap
def delete_versions(f, versions_to_delete, names=('values',)):
"""
Modified replay.delete_version to delete multiple versions.
"""
if isinstance(f, VersionedHDF5File):
f = f.f
def callback(dataset, version_name):
if version_name in versions_to_delete:
return
return dataset
newf = tmp_group(f)
for name in names:
recreate_dataset(f, name, newf, callback=callback)
swap(f, newf)
for version in versions_to_delete:
del f['_version_data/versions'][version]
del newf[newf.name]
class TimeDeleting:
params = [10, 30, 50]
timeout = 1000
def setup(self, n):
if not os.path.exists(filename):
with h5py.File(filename, 'w') as f:
vf = VersionedHDF5File(f)
with vf.stage_version('init') as sv:
sv.create_dataset('values', shape=(0, 0), dtype='float', fillvalue=numpy.nan,
chunks=(22, 100), maxshape=(None, None), compression='lzf')
# generate some test data with around 1000 versions
v = 1
with h5py.File(filename, 'r+') as f:
vf = VersionedHDF5File(f)
for d in range(22):
with vf.stage_version(str(v)) as sv:
values_ds = sv['values']
values_ds.resize((values_ds.shape[0] + 1, values_ds.shape[1] + 5000))
values_ds[-1, -5000] = numpy.random.rand()
v += 1
for c in range(n):
with vf.stage_version(str(v)) as sv:
values_ds = sv['values']
idxs = numpy.random.choice(values_ds.shape[1], 50, replace=False)
values_ds[-1, idxs] = numpy.random.rand(50)
v += 1
def teardown(self, n):
os.remove(filename)
def time_delete(self, n):
tmp_name = tempfile.mktemp('.h5')
shutil.copy2(filename, tmp_name)
try:
# want to keep only every 10th version
versions_to_delete = []
with h5py.File(tmp_name, 'r') as f:
vf = VersionedHDF5File(f)
versions = sorted([(v, vf._versions[v].attrs['timestamp']) for v in vf._versions], key=lambda t: t[1])
for i, v in enumerate(versions):
if i % 10 != 0:
versions_to_delete.append(v[0])
with h5py.File(tmp_name, 'r+') as f:
delete_versions(f, versions_to_delete)
finally:
os.remove(tmp_name)
| {"/versioned_hdf5/replay.py": ["/versioned_hdf5/api.py", "/versioned_hdf5/slicetools.py", "/versioned_hdf5/hashtable.py"], "/analysis/generate_data.py": ["/utils/__init__.py", "/versioned_hdf5/api.py"], "/versioned_hdf5/__init__.py": ["/versioned_hdf5/api.py", "/versioned_hdf5/replay.py"], "/benchmarks/resize.py": ["/versioned_hdf5/__init__.py"], "/versioned_hdf5/tests/test_replay.py": ["/versioned_hdf5/replay.py", "/versioned_hdf5/hashtable.py"], "/versioned_hdf5/api.py": ["/versioned_hdf5/hashtable.py"], "/benchmarks/many_chunks.py": ["/versioned_hdf5/__init__.py"], "/benchmarks/versionedhdf5file.py": ["/versioned_hdf5/__init__.py"], "/benchmarks/delete_versions.py": ["/versioned_hdf5/__init__.py", "/versioned_hdf5/replay.py"], "/versioned_hdf5/tests/test_hashtable.py": ["/versioned_hdf5/hashtable.py", "/versioned_hdf5/tests/helpers.py", "/versioned_hdf5/__init__.py"], "/analysis/performance_tests.py": ["/versioned_hdf5/__init__.py"]} |
62,232 | ArvidJB/versioned-hdf5 | refs/heads/master | /versioned_hdf5/tests/test_hashtable.py | from pytest import raises
import numpy as np
import h5py
from ..backend import create_base_dataset
from ..hashtable import Hashtable
from .helpers import setup_vfile
from .. import VersionedHDF5File
def test_hashtable(h5file):
create_base_dataset(h5file, 'test_data', data=np.empty((0,)))
with Hashtable(h5file, 'test_data') as h:
assert len(h) == 0
h[b'\xff'*32] = slice(0, 1)
assert len(h) == 1
assert h[b'\xff'*32] == slice(0, 1)
assert h.largest_index == 1
assert bytes(h.hash_table[0][0]) == b'\xff'*32
assert tuple(h.hash_table[0][1]) == (0, 1)
assert h == {b'\xff'*32: slice(0, 1)}
with raises(TypeError):
h['\x01'*32] = slice(0, 1)
with raises(ValueError):
h[b'\x01'] = slice(0, 1)
with raises(TypeError):
h[b'\x01'*32] = (0, 1)
with raises(ValueError):
h[b'\x01'*32] = slice(0, 4, 2)
def test_from_raw_data():
with setup_vfile('test.h5') as f:
vf = VersionedHDF5File(f)
with vf.stage_version('0') as sv:
sv.create_dataset('test_data', data=np.arange(100), chunks=(10,))
h = Hashtable(f, 'test_data')
h_dataset = h.hash_table_dataset
h2 = Hashtable.from_raw_data(f, 'test_data',
hash_table_name='test_hash_table')
h2_dataset = h2.hash_table_dataset
assert h2_dataset.name == '/_version_data/test_data/test_hash_table'
np.testing.assert_equal(h_dataset[:], h2_dataset[:])
def test_hashtable_multidimension(h5file):
# Ensure that the same data with different shape hashes differently
create_base_dataset(h5file, 'test_data', data=np.empty((0,)))
h = Hashtable(h5file, 'test_data')
assert h.hash(np.ones((1, 2, 3,))) != h.hash(np.ones((3, 2, 1)))
def test_issue_208():
with setup_vfile('test.h5') as f:
vf = VersionedHDF5File(f)
with vf.stage_version('0') as sv:
sv.create_dataset('bar', data=np.arange(10))
with h5py.File('test.h5', 'r+') as f:
vf = VersionedHDF5File(f)
with vf.stage_version('1') as sv:
sv['bar'].resize((12,))
sv['bar'][8:12] = sv['bar'][6:10]
sv['bar'][6:8] = [0, 0]
| {"/versioned_hdf5/replay.py": ["/versioned_hdf5/api.py", "/versioned_hdf5/slicetools.py", "/versioned_hdf5/hashtable.py"], "/analysis/generate_data.py": ["/utils/__init__.py", "/versioned_hdf5/api.py"], "/versioned_hdf5/__init__.py": ["/versioned_hdf5/api.py", "/versioned_hdf5/replay.py"], "/benchmarks/resize.py": ["/versioned_hdf5/__init__.py"], "/versioned_hdf5/tests/test_replay.py": ["/versioned_hdf5/replay.py", "/versioned_hdf5/hashtable.py"], "/versioned_hdf5/api.py": ["/versioned_hdf5/hashtable.py"], "/benchmarks/many_chunks.py": ["/versioned_hdf5/__init__.py"], "/benchmarks/versionedhdf5file.py": ["/versioned_hdf5/__init__.py"], "/benchmarks/delete_versions.py": ["/versioned_hdf5/__init__.py", "/versioned_hdf5/replay.py"], "/versioned_hdf5/tests/test_hashtable.py": ["/versioned_hdf5/hashtable.py", "/versioned_hdf5/tests/helpers.py", "/versioned_hdf5/__init__.py"], "/analysis/performance_tests.py": ["/versioned_hdf5/__init__.py"]} |
62,233 | ArvidJB/versioned-hdf5 | refs/heads/master | /analysis/performance_tests.py | import os
import json
import h5py
from versioned_hdf5 import VersionedHDF5File
from generate_data import TestVersionedDatasetPerformance as TVDP
# auxiliary code to format file sizes
def format_size(size):
"""
Auxiliary function to convert bytes to a more readable
human format.
"""
suffixes = ['B', 'KB', 'MB', 'GB']
i = 0
while size >= 1024 and i < len(suffixes) - 1:
size = size / 1024
i += 1
return f"{size:.2f} {suffixes[i]}"
class PerformanceTests:
def __init__(self, **kwargs):
pass
def _setoptions(self, options):
keys = options.keys()
if "path" in keys:
self.path = options["path"]
else:
self.path = "."
if "num_transactions" in keys:
self.num_transactions = options["num_transactions"]
else:
self.num_transactions = []
if "exponents" in keys:
self.exponents = options["exponents"]
else:
self.exponents = []
if "compression" in keys:
self.compression = options["compression"]
else:
self.compression = []
if "verbose" in keys:
self.verbose = options["verbose"]
else:
self.verbose = False
def create_files(self, versions=True):
tests = []
msg = ""
for c in self.compression:
for p in self.exponents:
for n in self.num_transactions:
chunk_size = 2 ** p
if versions:
name = f"{self.testname}_{n}_{p}_{c}"
else:
name = f"{self.testname}_{n}_{p}_{c}_no_versions"
filename = os.path.join(self.path, f"{name}.h5")
msg += f"File with {n} transactions, chunk size 2**{p} " \
f"and compression filter {c}"
try:
h5pyfile = h5py.File(filename, 'r')
msg += " exists - unable to compute creation time.\n"
t = 0
except Exception:
msg += " not available. Creating new file.\n"
# t0 = time.time()
t = self.testfun(n, name, chunk_size, c,
versions=versions, deterministic=True)
# t = time.time()-t0
h5pyfile = h5py.File(filename, 'r')
if versions:
data = VersionedHDF5File(h5pyfile)
tests.append(dict(num_transactions=n,
chunk_size=chunk_size,
compression=c,
filename=filename,
h5pyfile=h5pyfile,
data=data,
t_write=t))
else:
tests.append(dict(num_transactions=n,
chunk_size=chunk_size,
compression=c,
filename=filename,
h5pyfile=h5pyfile,
t_write=t))
for test in tests:
test['size'] = os.path.getsize(test['filename'])
test['size_label'] = format_size(test['size'])
if versions:
nt = len(self.num_transactions)
for test in tests[-nt:]:
lengths = []
total_size = 0
for vname in test['data']._versions:
if vname != '__first_version__':
version = test['data'][vname]
group_key = list(version.keys())[0]
lengths.append(len(version[group_key]['val']))
total_size += len(version[group_key]['val'])
test['theoretical_sizes'] = 24 * total_size
# Removing some irrelevant info from the dictionary
summary = []
for test in tests:
summary.append(dict((k, test[k]) for k in ['num_transactions',
'filename', 'size',
'size_label', 't_write',
'chunk_size',
'compression']))
test['h5pyfile'].close()
self.tests = tests
return summary, msg
def save(self, summary, filename):
with open(f"{filename}.json", "w") as json_out:
json.dump(summary, json_out)
class test_large_fraction_changes_sparse(PerformanceTests):
def __init__(self, **kwargs):
self.testname = "test_large_fraction_changes_sparse"
self.testfun = TVDP().test_large_fraction_changes_sparse
super()._setoptions(options=kwargs)
def create_files(self, versions=True):
return super().create_files(versions=versions)
def save(self, summary, filename):
super().save(summary, filename)
class test_small_fraction_changes_sparse(PerformanceTests):
def __init__(self, **kwargs):
self.testname = "test_small_fraction_changes_sparse"
self.testfun = TVDP().test_small_fraction_changes_sparse
super()._setoptions(options=kwargs)
def create_files(self, versions=True):
return super().create_files(versions=versions)
def save(self, summary, filename):
super().save(summary, filename)
class test_mostly_appends_sparse(PerformanceTests):
def __init__(self, **kwargs):
self.testname = "test_mostly_appends_sparse"
self.testfun = TVDP().test_mostly_appends_sparse
super()._setoptions(options=kwargs)
def create_files(self, versions=True):
return super().create_files(versions=versions)
def save(self, summary, filename):
super().save(summary, filename)
class test_mostly_appends_dense(PerformanceTests):
def __init__(self, **kwargs):
self.testname = "test_mostly_appends_dense"
self.testfun = TVDP().test_mostly_appends_dense
super()._setoptions(options=kwargs)
def create_files(self, versions=True):
return super().create_files(versions=versions)
def save(self, summary, filename):
super().save(summary, filename)
class test_large_fraction_constant_sparse(PerformanceTests):
def __init__(self, **kwargs):
self.testname = "test_large_fraction_constant_sparse"
self.testfun = TVDP().test_large_fraction_constant_sparse
super()._setoptions(options=kwargs)
def create_files(self, versions=True):
return super().create_files(versions=versions)
def save(self, summary, filename):
super().save(summary, filename)
if __name__ == "__main__":
tests = [test_mostly_appends_dense,
test_small_fraction_changes_sparse,
test_large_fraction_changes_sparse,
test_large_fraction_constant_sparse,
test_mostly_appends_sparse]
for test in tests:
testcase = test(num_transactions=[2],
exponents=[12, ],
compression=[None, ])
summary, msg = testcase.create_files(versions=True)
testcase.save(summary, f"{testcase.testname}")
summary, msg = testcase.create_files(versions=False)
testcase.save(summary, f"{testcase.testname}_no_versions")
| {"/versioned_hdf5/replay.py": ["/versioned_hdf5/api.py", "/versioned_hdf5/slicetools.py", "/versioned_hdf5/hashtable.py"], "/analysis/generate_data.py": ["/utils/__init__.py", "/versioned_hdf5/api.py"], "/versioned_hdf5/__init__.py": ["/versioned_hdf5/api.py", "/versioned_hdf5/replay.py"], "/benchmarks/resize.py": ["/versioned_hdf5/__init__.py"], "/versioned_hdf5/tests/test_replay.py": ["/versioned_hdf5/replay.py", "/versioned_hdf5/hashtable.py"], "/versioned_hdf5/api.py": ["/versioned_hdf5/hashtable.py"], "/benchmarks/many_chunks.py": ["/versioned_hdf5/__init__.py"], "/benchmarks/versionedhdf5file.py": ["/versioned_hdf5/__init__.py"], "/benchmarks/delete_versions.py": ["/versioned_hdf5/__init__.py", "/versioned_hdf5/replay.py"], "/versioned_hdf5/tests/test_hashtable.py": ["/versioned_hdf5/hashtable.py", "/versioned_hdf5/tests/helpers.py", "/versioned_hdf5/__init__.py"], "/analysis/performance_tests.py": ["/versioned_hdf5/__init__.py"]} |
62,234 | ArvidJB/versioned-hdf5 | refs/heads/master | /versioned_hdf5/slicetools.py | from functools import lru_cache
from ndindex import Slice, Tuple
def spaceid_to_slice(space):
"""
Convert an h5py spaceid object into an ndindex index
The resulting index is always a Tuple index.
"""
from h5py import h5s
sel_type = space.get_select_type()
if sel_type == h5s.SEL_ALL:
return Tuple()
elif sel_type == h5s.SEL_HYPERSLABS:
slices = []
starts, strides, counts, blocks = space.get_regular_hyperslab()
for start, stride, count, block in zip(starts, strides, counts, blocks):
slices.append(hyperslab_to_slice(start, stride, count, block))
return Tuple(*slices)
elif sel_type == h5s.SEL_NONE:
return Tuple(Slice(0, 0),)
else:
raise NotImplementedError("Point selections are not yet supported")
@lru_cache(2048)
def hyperslab_to_slice(start, stride, count, block):
if not (block == 1 or count == 1):
raise NotImplementedError("Nontrivial blocks are not yet supported")
end = start + (stride*(count - 1) + 1)*block
stride = stride if block == 1 else 1
return Slice(start, end, stride)
| {"/versioned_hdf5/replay.py": ["/versioned_hdf5/api.py", "/versioned_hdf5/slicetools.py", "/versioned_hdf5/hashtable.py"], "/analysis/generate_data.py": ["/utils/__init__.py", "/versioned_hdf5/api.py"], "/versioned_hdf5/__init__.py": ["/versioned_hdf5/api.py", "/versioned_hdf5/replay.py"], "/benchmarks/resize.py": ["/versioned_hdf5/__init__.py"], "/versioned_hdf5/tests/test_replay.py": ["/versioned_hdf5/replay.py", "/versioned_hdf5/hashtable.py"], "/versioned_hdf5/api.py": ["/versioned_hdf5/hashtable.py"], "/benchmarks/many_chunks.py": ["/versioned_hdf5/__init__.py"], "/benchmarks/versionedhdf5file.py": ["/versioned_hdf5/__init__.py"], "/benchmarks/delete_versions.py": ["/versioned_hdf5/__init__.py", "/versioned_hdf5/replay.py"], "/versioned_hdf5/tests/test_hashtable.py": ["/versioned_hdf5/hashtable.py", "/versioned_hdf5/tests/helpers.py", "/versioned_hdf5/__init__.py"], "/analysis/performance_tests.py": ["/versioned_hdf5/__init__.py"]} |
62,235 | ArvidJB/versioned-hdf5 | refs/heads/master | /versioned_hdf5/tests/test_versions.py | from pytest import raises
import numpy as np
from numpy.testing import assert_equal
from ndindex import Tuple, Slice
from ..backend import DEFAULT_CHUNK_SIZE
from ..versions import (create_version_group, commit_version,
get_nth_previous_version, set_current_version,
all_versions, delete_version)
def test_create_version(h5file):
chunk_size = 2**10
chunks = (chunk_size,)
data = np.concatenate((np.ones((2*chunk_size,)),
2*np.ones(chunks),
3*np.ones(chunks)))
version1 = create_version_group(h5file, 'version1', '')
commit_version(version1, {'test_data': data},
chunks={'test_data': chunks},
compression={'test_data': 'gzip'},
compression_opts={'test_data': 3})
version_bad = create_version_group(h5file, 'version_bad', '')
raises(ValueError, lambda: commit_version(version_bad, {'test_data': data},
chunks={'test_data': (2**9,)}))
delete_version(h5file, 'version_bad', 'version1')
version_bad = create_version_group(h5file, 'version_bad', '')
raises(ValueError, lambda: commit_version(version_bad, {'test_data': data},
compression={'test_data': 'lzf'}))
delete_version(h5file, 'version_bad', 'version1')
version_bad = create_version_group(h5file, 'version_bad', '')
raises(ValueError, lambda: commit_version(version_bad,
{'test_data': data},
compression_opts={'test_data': 4}))
delete_version(h5file, 'version_bad', 'version1')
assert version1.attrs['prev_version'] == '__first_version__'
assert version1.parent.attrs['current_version'] == 'version1'
# Test against the file here, not version1, since version1 is the
# InMemoryGroup returned from create_version_group, but we did not add
# the datasets to it directly.
assert_equal(h5file['_version_data/versions/version1/test_data'], data)
ds = h5file['/_version_data/test_data/raw_data']
assert ds.shape == (3*chunk_size,)
assert_equal(ds[0:1*chunk_size], 1.0)
assert_equal(ds[1*chunk_size:2*chunk_size], 2.0)
assert_equal(ds[2*chunk_size:3*chunk_size], 3.0)
assert ds.compression == 'gzip'
assert ds.compression_opts == 3
data[0] = 0.0
version2 = create_version_group(h5file, 'version2', 'version1')
raises(ValueError,
lambda: commit_version(version1, {'test_data': data},
make_current=False))
commit_version(version2, {'test_data': data},
make_current=False)
assert version2.attrs['prev_version'] == 'version1'
assert_equal(h5file['_version_data/versions/version2/test_data'], data)
assert version2.parent.attrs['current_version'] == 'version1'
assert ds.shape == (4*chunk_size,)
assert_equal(ds[0:1*chunk_size], 1.0)
assert_equal(ds[1*chunk_size:2*chunk_size], 2.0)
assert_equal(ds[2*chunk_size:3*chunk_size], 3.0)
assert_equal(ds[3*chunk_size], 0.0)
assert_equal(ds[3*chunk_size+1:4*chunk_size], 1.0)
assert ds.compression == 'gzip'
assert ds.compression_opts == 3
assert set(all_versions(h5file)) == {'version1', 'version2'}
assert set(all_versions(h5file, include_first=True)) == {'version1',
'version2',
'__first_version__'}
def test_create_version_chunks(h5file):
chunk_size = 2**10
chunks = (chunk_size,)
data = np.concatenate((np.ones((2*chunk_size,)),
2*np.ones(chunks),
3*np.ones(chunks)))
# TODO: Support creating the initial version with chunks
version1 = create_version_group(h5file, 'version1')
commit_version(version1, {'test_data': data},
chunks={'test_data': chunks},
compression={'test_data': 'gzip'},
compression_opts={'test_data': 3})
version_bad = create_version_group(h5file, 'version_bad', '')
raises(ValueError, lambda: commit_version(version_bad,
{'test_data': data},
chunks={'test_data': (2**9,)}))
delete_version(h5file, 'version_bad', 'version1')
version_bad = create_version_group(h5file, 'version_bad', '')
raises(ValueError, lambda: commit_version(version_bad,
{'test_data': data},
compression={'test_data':'lzf'}))
delete_version(h5file, 'version_bad', 'version1')
version_bad = create_version_group(h5file, 'version_bad', '')
raises(ValueError, lambda: commit_version(version_bad,
{'test_data': data},
compression_opts={'test_data':4}))
delete_version(h5file, 'version_bad', 'version1')
assert_equal(h5file['_version_data/versions/version1/test_data'], data)
ds = h5file['/_version_data/test_data/raw_data']
assert ds.shape == (3*chunk_size,)
assert_equal(ds[0:1*chunk_size], 1.0)
assert_equal(ds[1*chunk_size:2*chunk_size], 2.0)
assert_equal(ds[2*chunk_size:3*chunk_size], 3.0)
assert ds.compression == 'gzip'
assert ds.compression_opts == 3
data2_chunks = {
Tuple(Slice(0*chunk_size, 1*chunk_size, 1)): np.ones(chunks),
Tuple(Slice(1*chunk_size, 2*chunk_size, 1)): np.ones(chunks),
Tuple(Slice(2*chunk_size, 3*chunk_size, 1)): 2*np.ones(chunks),
Tuple(Slice(3*chunk_size, 4*chunk_size, 1)): 3*np.ones(chunks),
}
data2_chunks[Tuple(Slice(0*chunk_size, 1*chunk_size, 1))][0] = 0.0
data[0] = 0.0
version2 = create_version_group(h5file, 'version2')
commit_version(version2, {'test_data': data2_chunks})
assert_equal(h5file['_version_data/versions/version2/test_data'], data)
assert ds.shape == (4*chunk_size,)
assert_equal(ds[0:1*chunk_size], 1.0)
assert_equal(ds[1*chunk_size:2*chunk_size], 2.0)
assert_equal(ds[2*chunk_size:3*chunk_size], 3.0)
assert_equal(ds[3*chunk_size], 0.0)
assert_equal(ds[3*chunk_size+1:4*chunk_size], 1.0)
assert ds.compression == 'gzip'
assert ds.compression_opts == 3
data3_chunks = {
Tuple(Slice(0*chunk_size, 1*chunk_size, 1)): np.ones(chunks),
Tuple(Slice(1*chunk_size, 2*chunk_size, 1)): Slice(0*chunk_size, 1*chunk_size),
Tuple(Slice(2*chunk_size, 3*chunk_size, 1)): Slice(1*chunk_size, 2*chunk_size),
Tuple(Slice(3*chunk_size, 4*chunk_size, 1)): Slice(2*chunk_size, 3*chunk_size),
}
data3_chunks[Tuple(Slice(0*chunk_size, 1*chunk_size, 1))][0] = 2.0
data[0] = 2.0
version3 = create_version_group(h5file, 'version3')
commit_version(version3, {'test_data': data3_chunks})
assert_equal(h5file['_version_data/versions/version3/test_data'], data)
assert ds.shape == (5*chunk_size,)
assert_equal(ds[0:1*chunk_size], 1.0)
assert_equal(ds[1*chunk_size:2*chunk_size], 2.0)
assert_equal(ds[2*chunk_size:3*chunk_size], 3.0)
assert_equal(ds[3*chunk_size], 0.0)
assert_equal(ds[3*chunk_size+1:4*chunk_size], 1.0)
assert_equal(ds[4*chunk_size], 2.0)
assert_equal(ds[4*chunk_size+1:5*chunk_size], 1.0)
assert set(all_versions(h5file)) == {'version1', 'version2', 'version3'}
def test_get_nth_prev_version(h5file):
data = np.concatenate((np.ones((2*DEFAULT_CHUNK_SIZE,)),
2*np.ones((DEFAULT_CHUNK_SIZE,)),
3*np.ones((DEFAULT_CHUNK_SIZE,))))
version1 = create_version_group(h5file, 'version1')
commit_version(version1, {'test_data': data})
data[0] = 2.0
version2 = create_version_group(h5file, 'version2')
commit_version(version2, {'test_data': data})
data[0] = 3.0
version3 = create_version_group(h5file, 'version3')
commit_version(version3, {'test_data': data})
data[1] = 2.0
version2_1 = create_version_group(h5file, 'version2_1', 'version1')
commit_version(version2_1, {'test_data': data})
assert get_nth_previous_version(h5file, 'version1', 0) == 'version1'
with raises(IndexError):
get_nth_previous_version(h5file, 'version1', 1)
assert get_nth_previous_version(h5file, 'version2', 0) == 'version2'
assert get_nth_previous_version(h5file, 'version2', 1) == 'version1'
with raises(IndexError):
get_nth_previous_version(h5file, 'version2', 2)
assert get_nth_previous_version(h5file, 'version3', 0) == 'version3'
assert get_nth_previous_version(h5file, 'version3', 1) == 'version2'
assert get_nth_previous_version(h5file, 'version3', 2) == 'version1'
with raises(IndexError):
get_nth_previous_version(h5file, 'version3', 3)
assert get_nth_previous_version(h5file, 'version2_1', 0) == 'version2_1'
assert get_nth_previous_version(h5file, 'version2_1', 1) == 'version1'
with raises(IndexError):
get_nth_previous_version(h5file, 'version2_1', 2)
def test_set_current_version(h5file):
data = np.concatenate((np.ones((2*DEFAULT_CHUNK_SIZE,)),
2*np.ones((DEFAULT_CHUNK_SIZE,)),
3*np.ones((DEFAULT_CHUNK_SIZE,))))
version1 = create_version_group(h5file, 'version1')
commit_version(version1, {'test_data': data})
versions = h5file['_version_data/versions']
assert versions.attrs['current_version'] == 'version1'
data[0] = 2.0
version2 = create_version_group(h5file, 'version2')
commit_version(version2, {'test_data': data})
assert versions.attrs['current_version'] == 'version2'
set_current_version(h5file, 'version1')
assert versions.attrs['current_version'] == 'version1'
with raises(ValueError):
set_current_version(h5file, 'version3')
def test_delete_version(h5file):
raises(ValueError, lambda: delete_version(h5file, 'version1'))
data = np.concatenate((np.ones((2*DEFAULT_CHUNK_SIZE,)),
2*np.ones((DEFAULT_CHUNK_SIZE,)),
3*np.ones((DEFAULT_CHUNK_SIZE,))))
version1 = create_version_group(h5file, 'version1')
commit_version(version1, {'test_data': data})
versions = h5file['_version_data/versions']
assert versions.attrs['current_version'] == 'version1'
raises(ValueError, lambda: delete_version(h5file, 'version1', 'doesntexist'))
delete_version(h5file, 'version1')
versions = h5file['_version_data/versions']
assert versions.attrs['current_version'] == '__first_version__'
assert list(versions) == ['__first_version__']
| {"/versioned_hdf5/replay.py": ["/versioned_hdf5/api.py", "/versioned_hdf5/slicetools.py", "/versioned_hdf5/hashtable.py"], "/analysis/generate_data.py": ["/utils/__init__.py", "/versioned_hdf5/api.py"], "/versioned_hdf5/__init__.py": ["/versioned_hdf5/api.py", "/versioned_hdf5/replay.py"], "/benchmarks/resize.py": ["/versioned_hdf5/__init__.py"], "/versioned_hdf5/tests/test_replay.py": ["/versioned_hdf5/replay.py", "/versioned_hdf5/hashtable.py"], "/versioned_hdf5/api.py": ["/versioned_hdf5/hashtable.py"], "/benchmarks/many_chunks.py": ["/versioned_hdf5/__init__.py"], "/benchmarks/versionedhdf5file.py": ["/versioned_hdf5/__init__.py"], "/benchmarks/delete_versions.py": ["/versioned_hdf5/__init__.py", "/versioned_hdf5/replay.py"], "/versioned_hdf5/tests/test_hashtable.py": ["/versioned_hdf5/hashtable.py", "/versioned_hdf5/tests/helpers.py", "/versioned_hdf5/__init__.py"], "/analysis/performance_tests.py": ["/versioned_hdf5/__init__.py"]} |
62,236 | ArvidJB/versioned-hdf5 | refs/heads/master | /benchmarks_install.py | """
This is the script used by asv run to install dependencies. It should not be
called directly.
This is needed because we have to install specific versions of ndindex
depending on what commit we are on, because some backwards incompatible
changes in ndindex were made in tandem with corresponding commits in
versioned-hdf5.
"""
import builtins
import sys
import os
import subprocess
# The first commit in versioned-hdf5 that is not compatible with ndindex 1.5
ndindex_16_commit = 'af9ba2313c73cf00c10f490407956ed3c0e6467e'
def print(*args):
# If we don't flush stdout, print output is out of order with run()
# output in the asv run -v log.
builtins.print(*args)
sys.stdout.flush()
def run(command, *args, **kwargs):
print(' '.join(command))
kwargs.setdefault('check', True)
return subprocess.run(command, *args, **kwargs)
def main():
commit, env_dir, build_dir = sys.argv[1:]
copy_env_dir(env_dir, commit)
install_dependencies(commit, env_dir)
install_versioned_hdf5(build_dir)
def copy_env_dir(env_dir, commit):
# asv reuses the env dir between runs. But it's simpler for us if we just
# restart from scratch, rather than trying to build an uninstall script.
# So what we do is copy the raw env dir into a template directory, then
# each time we install, we replace the env dir with that template
# directory.
template_dir = env_dir + '-template'
if not os.path.exists(template_dir):
# This is the first time we've run
print("Creating template env directory", template_dir)
run(['cp', '-R', env_dir, template_dir])
run(['rm', '-rf', env_dir])
run(['cp', '-R', template_dir, env_dir])
# asv checks out the project in the env directory, which we just reset. So
# checkout it out to the correct commit.
os.chdir(os.path.join(env_dir, 'project'))
run(['git', 'checkout', commit])
os.chdir(env_dir)
def install_dependencies(commit, env_dir):
# Check if HEAD is after the ndindex_16_commit.
# See https://stackoverflow.com/questions/3005392/how-can-i-tell-if-one-commit-is-a-descendant-of-another-commit
p = run(['git', 'merge-base', '--is-ancestor', ndindex_16_commit, commit],
check=False)
if p.returncode == 1:
print("Installing ndindex 1.5")
install(env_dir, ndindex_version='==1.5')
elif p.returncode == 0:
print("Installing ndindex >=1.5.1")
install(env_dir, ndindex_version='>=1.5.1')
else:
raise RuntimeError(f"Error checking commit history for benchmarks install (git gave return code {p.returncode})")
def install_versioned_hdf5(build_dir):
print("Installing versioned HDF5")
run(['python', '-m', 'pip', 'install', build_dir])
def install(env_dir, ndindex_version='>=1.5', h5py_version='<3'):
deps = [
'h5py' + h5py_version,
'ndindex' + ndindex_version,
]
run(['conda', 'install', '-c', 'conda-forge', '-p', env_dir, *deps], check=True)
if __name__ == '__main__':
main()
| {"/versioned_hdf5/replay.py": ["/versioned_hdf5/api.py", "/versioned_hdf5/slicetools.py", "/versioned_hdf5/hashtable.py"], "/analysis/generate_data.py": ["/utils/__init__.py", "/versioned_hdf5/api.py"], "/versioned_hdf5/__init__.py": ["/versioned_hdf5/api.py", "/versioned_hdf5/replay.py"], "/benchmarks/resize.py": ["/versioned_hdf5/__init__.py"], "/versioned_hdf5/tests/test_replay.py": ["/versioned_hdf5/replay.py", "/versioned_hdf5/hashtable.py"], "/versioned_hdf5/api.py": ["/versioned_hdf5/hashtable.py"], "/benchmarks/many_chunks.py": ["/versioned_hdf5/__init__.py"], "/benchmarks/versionedhdf5file.py": ["/versioned_hdf5/__init__.py"], "/benchmarks/delete_versions.py": ["/versioned_hdf5/__init__.py", "/versioned_hdf5/replay.py"], "/versioned_hdf5/tests/test_hashtable.py": ["/versioned_hdf5/hashtable.py", "/versioned_hdf5/tests/helpers.py", "/versioned_hdf5/__init__.py"], "/analysis/performance_tests.py": ["/versioned_hdf5/__init__.py"]} |
62,237 | ArvidJB/versioned-hdf5 | refs/heads/master | /benchmarks/hdf5.py | # Pure hdf5 version of TimeInMemoryDataset and TimeInMemoryArraDataset
import os
import h5py
import numpy as np
class TimePureHDF5:
def setup(self):
self.file = h5py.File('bench.hdf5', 'w')
self.file.create_dataset('data',
data=np.arange(10000).reshape((100, 10, 10)),
chunks=(3, 3, 3), maxshape=(None, None, None))
def teardown(self):
self.file.close()
os.remove('bench.hdf5')
def time_getattr(self):
dataset = self.file['data']
dataset[:, 0, 0:6]
def time_setattr(self):
dataset = self.file['data']
dataset[:, 0, 0:6] = -1
def time_resize_bigger(self):
dataset = self.file['data']
dataset.resize((100, 100, 100))
def time_resize_smaller(self):
dataset = self.file['data']
dataset.resize((10, 10, 10))
| {"/versioned_hdf5/replay.py": ["/versioned_hdf5/api.py", "/versioned_hdf5/slicetools.py", "/versioned_hdf5/hashtable.py"], "/analysis/generate_data.py": ["/utils/__init__.py", "/versioned_hdf5/api.py"], "/versioned_hdf5/__init__.py": ["/versioned_hdf5/api.py", "/versioned_hdf5/replay.py"], "/benchmarks/resize.py": ["/versioned_hdf5/__init__.py"], "/versioned_hdf5/tests/test_replay.py": ["/versioned_hdf5/replay.py", "/versioned_hdf5/hashtable.py"], "/versioned_hdf5/api.py": ["/versioned_hdf5/hashtable.py"], "/benchmarks/many_chunks.py": ["/versioned_hdf5/__init__.py"], "/benchmarks/versionedhdf5file.py": ["/versioned_hdf5/__init__.py"], "/benchmarks/delete_versions.py": ["/versioned_hdf5/__init__.py", "/versioned_hdf5/replay.py"], "/versioned_hdf5/tests/test_hashtable.py": ["/versioned_hdf5/hashtable.py", "/versioned_hdf5/tests/helpers.py", "/versioned_hdf5/__init__.py"], "/analysis/performance_tests.py": ["/versioned_hdf5/__init__.py"]} |
62,238 | ArvidJB/versioned-hdf5 | refs/heads/master | /versioned_hdf5/hashtable.py | import numpy as np
from ndindex import Slice, Tuple, ChunkSize
import hashlib
from collections.abc import MutableMapping
from functools import lru_cache
class Hashtable(MutableMapping):
"""
A proxy class representing the hash table for an array
The hash table for an array is a mapping from {sha256_hash: slice}, where
slice is a slice for the data in the array.
General usage should look like
with Hashtable(f, name) as h:
data_hash = h.hash(data[raw_slice])
raw_slice = h.setdefault(data_hash, raw_slice)
where setdefault will insert the hash into the table if it
doesn't exist, and return the existing entry otherwise.
hashtable.largest_index is the next index in the array that slices
should be mapped to.
Note that for performance reasons, the hashtable does not write to the
dataset until you call write() or it exit as a context manager.
"""
# Cache instances of the class for performance purposes. This works off
# the assumption that nothing else modifies the version data.
# This is done here because putting @lru_cache() on the class breaks the
# classmethods. Warning: This does not normalize kwargs, so it is possible
# to have multiple hashtable instances for the same hashtable.
@lru_cache()
def __new__(cls, *args, **kwargs):
obj = super().__new__(cls)
return obj
def __init__(self, f, name, *, chunk_size=None, hash_table_name='hash_table'):
from .backend import DEFAULT_CHUNK_SIZE
self.f = f
self.name = name
self.chunk_size = chunk_size or DEFAULT_CHUNK_SIZE
self.hash_table_name = hash_table_name
if hash_table_name in f['_version_data'][name]:
self._load_hashtable()
else:
self._create_hashtable()
self._largest_index = None
self.hash_table = f['_version_data'][name][hash_table_name][:]
self.hash_table_dataset = f['_version_data'][name][hash_table_name]
@classmethod
def from_raw_data(cls, f, name, chunk_size=None, hash_table_name='hash_table'):
if hash_table_name in f['_version_data'][name]:
raise ValueError(f"a hash table {hash_table_name!r} for {name!r} already exists")
hashtable = cls(f, name, chunk_size=chunk_size, hash_table_name=hash_table_name)
raw_data = f['_version_data'][name]['raw_data']
chunks = ChunkSize(raw_data.chunks)
for c in chunks.indices(raw_data.shape):
data_hash = hashtable.hash(raw_data[c.raw])
hashtable.setdefault(data_hash, c.args[0])
hashtable.write()
return hashtable
hash_function = hashlib.sha256
hash_size = hash_function().digest_size
def hash(self, data):
return self.hash_function(data.data.tobytes() + bytes(str(data.shape), 'ascii')).digest()
@property
def largest_index(self):
if self._largest_index is None:
self._largest_index = self.hash_table_dataset.attrs['largest_index']
return self._largest_index
@largest_index.setter
def largest_index(self, value):
self._largest_index = value
def write(self):
largest_index = self.largest_index
if largest_index >= self.hash_table_dataset.shape[0]:
self.hash_table_dataset.resize((largest_index,))
# largest_index is here for backwards compatibility for when the hash
# table shape used to always be chunk_size aligned.
self.hash_table_dataset.attrs['largest_index'] = self.largest_index
self.hash_table_dataset[:largest_index] = self.hash_table[:largest_index]
def inverse(self):
r"""
Return a dictionary mapping Slice: array_of_hash.
The Slices are all `reduce()`\d.
"""
return {Slice(*s).reduce(): h for h, s in self.hash_table}
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
if exc_type is not None:
return
self.write()
def _create_hashtable(self):
f = self.f
name = self.name
# TODO: Use get_chunks() here (the real chunk size should be based on
# bytes, not number of elements)
dtype = np.dtype([('hash', 'B', (self.hash_size,)), ('shape', 'i8', (2,))])
hash_table = f['_version_data'][name].create_dataset(self.hash_table_name,
shape=(1,), dtype=dtype,
chunks=(self.chunk_size,),
maxshape=(None,),
compression='lzf')
hash_table.attrs['largest_index'] = 0
self._indices = {}
def _load_hashtable(self):
hash_table = self.f['_version_data'][self.name][self.hash_table_name]
largest_index = hash_table.attrs['largest_index']
hash_table_arr = hash_table[:largest_index]
hashes = bytes(hash_table_arr['hash'])
hashes = [hashes[i*self.hash_size:(i+1)*self.hash_size] for i in range(largest_index)]
self._indices = {k: i for i, k in enumerate(hashes)}
def __getitem__(self, key):
if isinstance(key, np.ndarray):
key = key.tobytes()
i = self._indices[key]
shapes = self.hash_table['shape']
return Slice(*shapes[i])
def __setitem__(self, key, value):
if isinstance(key, np.ndarray):
key = key.tobytes()
if not isinstance(key, bytes):
raise TypeError(f"key must be bytes, got {type(key)}")
if len(key) != self.hash_size:
raise ValueError("key must be %d bytes" % self.hash_size)
if isinstance(value, Tuple):
if len(value.args) > 1:
raise NotImplementedError("Chunking in more other than the first dimension")
value = value.args[0]
if not isinstance(value, (slice, Slice)):
raise TypeError("value must be a slice object")
value = Slice(value)
if value.isempty():
return
if value.step not in [1, None]:
raise ValueError("only step-1 slices are supported")
kv = (list(key), (value.start, value.stop))
if key in self._indices:
if bytes(self.hash_table[self._indices[key]])[0] != key:
raise ValueError("The key %s is already in the hashtable under another index.")
self.hash_table[self._indices[key]] = kv
else:
if self.largest_index >= self.hash_table.shape[0]:
newshape = (self.hash_table.shape[0] + self.chunk_size,)
new_hash_table = np.zeros(newshape, dtype=self.hash_table.dtype)
new_hash_table[:self.hash_table.shape[0]] = self.hash_table
self.hash_table = new_hash_table
self.hash_table[self.largest_index] = kv
self._indices[key] = self.largest_index
self.largest_index += 1
def __delitem__(self, key):
raise NotImplementedError
def __iter__(self):
return iter(self._indices)
def __len__(self):
return len(self._indices)
| {"/versioned_hdf5/replay.py": ["/versioned_hdf5/api.py", "/versioned_hdf5/slicetools.py", "/versioned_hdf5/hashtable.py"], "/analysis/generate_data.py": ["/utils/__init__.py", "/versioned_hdf5/api.py"], "/versioned_hdf5/__init__.py": ["/versioned_hdf5/api.py", "/versioned_hdf5/replay.py"], "/benchmarks/resize.py": ["/versioned_hdf5/__init__.py"], "/versioned_hdf5/tests/test_replay.py": ["/versioned_hdf5/replay.py", "/versioned_hdf5/hashtable.py"], "/versioned_hdf5/api.py": ["/versioned_hdf5/hashtable.py"], "/benchmarks/many_chunks.py": ["/versioned_hdf5/__init__.py"], "/benchmarks/versionedhdf5file.py": ["/versioned_hdf5/__init__.py"], "/benchmarks/delete_versions.py": ["/versioned_hdf5/__init__.py", "/versioned_hdf5/replay.py"], "/versioned_hdf5/tests/test_hashtable.py": ["/versioned_hdf5/hashtable.py", "/versioned_hdf5/tests/helpers.py", "/versioned_hdf5/__init__.py"], "/analysis/performance_tests.py": ["/versioned_hdf5/__init__.py"]} |
62,243 | Jdale28/code-challenge | refs/heads/master | /main/migrations/0001_initial.py | # Generated by Django 2.1.5 on 2019-02-06 18:55
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Codelines',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255)),
],
),
migrations.CreateModel(
name='Snippets',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('snippet', models.CharField(max_length=255)),
('codelines', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='codelines', to='main.Codelines')),
],
),
]
| {"/main/admin.py": ["/main/models.py"], "/main/views.py": ["/main/serializers.py", "/main/models.py"], "/main/serializers.py": ["/main/models.py"]} |
62,244 | Jdale28/code-challenge | refs/heads/master | /main/admin.py | from django.contrib import admin
from main.models import Codelines, Snippets
admin.site.register([Codelines, Snippets]) | {"/main/admin.py": ["/main/models.py"], "/main/views.py": ["/main/serializers.py", "/main/models.py"], "/main/serializers.py": ["/main/models.py"]} |
62,245 | Jdale28/code-challenge | refs/heads/master | /main/migrations/0003_auto_20190220_2201.py | # Generated by Django 2.1.7 on 2019-02-20 22:01
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('main', '0002_auto_20190220_2143'),
]
operations = [
migrations.AlterField(
model_name='snippets',
name='codelines',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='snippets', to='main.Codelines'),
),
]
| {"/main/admin.py": ["/main/models.py"], "/main/views.py": ["/main/serializers.py", "/main/models.py"], "/main/serializers.py": ["/main/models.py"]} |
62,246 | Jdale28/code-challenge | refs/heads/master | /main/migrations/0002_auto_20190220_2143.py | # Generated by Django 2.1.7 on 2019-02-20 21:43
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('main', '0001_initial'),
]
operations = [
migrations.RenameField(
model_name='snippets',
old_name='snippet',
new_name='snippets',
),
]
| {"/main/admin.py": ["/main/models.py"], "/main/views.py": ["/main/serializers.py", "/main/models.py"], "/main/serializers.py": ["/main/models.py"]} |
62,247 | Jdale28/code-challenge | refs/heads/master | /main/models.py | from django.db import models
class Codelines(models.Model):
name = models.CharField(max_length=255)
def __str__(self):
return self.name
class Snippets(models.Model):
snippets = models.CharField(max_length=255)
codelines = models.ForeignKey(Codelines, on_delete=models.CASCADE, related_name='snippets')
def __str__(self):
return self.snippets | {"/main/admin.py": ["/main/models.py"], "/main/views.py": ["/main/serializers.py", "/main/models.py"], "/main/serializers.py": ["/main/models.py"]} |
62,248 | Jdale28/code-challenge | refs/heads/master | /main/views.py | from django.shortcuts import render
from rest_framework import viewsets
from .serializers import CodelinesSerializer, SnippetsSerializer
from .models import Codelines, Snippets
class CodelinesView(viewsets.ModelViewSet):
authentication_classes = []
queryset = Codelines.objects.all()
serializer_class = CodelinesSerializer
class SnippetsView(viewsets.ModelViewSet):
authentication_classes = []
queryset = Snippets.objects.all()
serializer_class = SnippetsSerializer | {"/main/admin.py": ["/main/models.py"], "/main/views.py": ["/main/serializers.py", "/main/models.py"], "/main/serializers.py": ["/main/models.py"]} |
62,249 | Jdale28/code-challenge | refs/heads/master | /main/urls.py | from django.urls import path, include
from rest_framework import routers
from . import views
router = routers.DefaultRouter()
router.register('codelines', views.CodelinesView)
router.register('snippets', views.SnippetsView)
urlpatterns = [
path('', include(router.urls))
] | {"/main/admin.py": ["/main/models.py"], "/main/views.py": ["/main/serializers.py", "/main/models.py"], "/main/serializers.py": ["/main/models.py"]} |
62,250 | Jdale28/code-challenge | refs/heads/master | /main/serializers.py | from rest_framework import serializers
from .models import Codelines, Snippets
class SnippetsSerializer(serializers.ModelSerializer):
class Meta:
model = Snippets
fields = ('id', 'snippets', 'codelines')
class CodelinesSerializer(serializers.ModelSerializer):
snippets = SnippetsSerializer(many=True, read_only=True)
class Meta:
model = Codelines
fields = ('id', 'name', 'snippets') | {"/main/admin.py": ["/main/models.py"], "/main/views.py": ["/main/serializers.py", "/main/models.py"], "/main/serializers.py": ["/main/models.py"]} |
62,272 | zakx/InvenTree | refs/heads/master | /InvenTree/project/views.py | from django.shortcuts import render, get_object_or_404
from django.http import HttpResponse
def index(request):
return HttpResponse("This is the Projects page")
| {"/InvenTree/track/admin.py": ["/InvenTree/track/models.py"], "/InvenTree/part/serializers.py": ["/InvenTree/part/models.py"], "/InvenTree/project/admin.py": ["/InvenTree/project/models.py"], "/InvenTree/part/admin.py": ["/InvenTree/part/models.py"], "/InvenTree/stock/views.py": ["/InvenTree/stock/models.py"], "/InvenTree/supplier/views.py": ["/InvenTree/supplier/models.py"], "/InvenTree/part/views.py": ["/InvenTree/part/models.py", "/InvenTree/part/serializers.py"], "/InvenTree/supplier/admin.py": ["/InvenTree/supplier/models.py"], "/InvenTree/stock/admin.py": ["/InvenTree/stock/models.py"]} |
62,273 | zakx/InvenTree | refs/heads/master | /pep_check.py | """
Checks all source files (.py) against PEP8 coding style.
The following rules are ignored:
- W293 - blank lines contain whitespace
- E501 - line too long (82 characters)
Run this script before submitting a Pull-Request to check your code.
"""
import subprocess
subprocess.call(['pep8', '--exclude=migrations', '--ignore=W293,E501', 'InvenTree'])
| {"/InvenTree/track/admin.py": ["/InvenTree/track/models.py"], "/InvenTree/part/serializers.py": ["/InvenTree/part/models.py"], "/InvenTree/project/admin.py": ["/InvenTree/project/models.py"], "/InvenTree/part/admin.py": ["/InvenTree/part/models.py"], "/InvenTree/stock/views.py": ["/InvenTree/stock/models.py"], "/InvenTree/supplier/views.py": ["/InvenTree/supplier/models.py"], "/InvenTree/part/views.py": ["/InvenTree/part/models.py", "/InvenTree/part/serializers.py"], "/InvenTree/supplier/admin.py": ["/InvenTree/supplier/models.py"], "/InvenTree/stock/admin.py": ["/InvenTree/stock/models.py"]} |
62,274 | zakx/InvenTree | refs/heads/master | /InvenTree/track/admin.py | from django.contrib import admin
from .models import UniquePart
class UniquePartAdmin(admin.ModelAdmin):
list_display = ('part', 'revision', 'serial', 'status', 'creation_date')
admin.site.register(UniquePart, UniquePartAdmin)
| {"/InvenTree/track/admin.py": ["/InvenTree/track/models.py"], "/InvenTree/part/serializers.py": ["/InvenTree/part/models.py"], "/InvenTree/project/admin.py": ["/InvenTree/project/models.py"], "/InvenTree/part/admin.py": ["/InvenTree/part/models.py"], "/InvenTree/stock/views.py": ["/InvenTree/stock/models.py"], "/InvenTree/supplier/views.py": ["/InvenTree/supplier/models.py"], "/InvenTree/part/views.py": ["/InvenTree/part/models.py", "/InvenTree/part/serializers.py"], "/InvenTree/supplier/admin.py": ["/InvenTree/supplier/models.py"], "/InvenTree/stock/admin.py": ["/InvenTree/stock/models.py"]} |
62,275 | zakx/InvenTree | refs/heads/master | /install.py | from __future__ import print_function
import subprocess
import argparse
def manage(*arg):
args = ["python", "InvenTree/manage.py"]
for a in arg:
args.append(a)
subprocess.call(args)
parser = argparse.ArgumentParser(description="Install InvenTree inventory management system")
parser.add_argument('-u', '--update', help='Update only, do not try to install required components', action='store_true')
args = parser.parse_args()
# If 'update' is specified, don't perform initial installation
if not args.update:
# Install django requirements
subprocess.call(["pip", "install", "django", "-q"])
subprocess.call(["pip", "install", "djangorestframework", "-q"])
# Initial database setup
manage("migrate")
# Make migrations for all apps
manage("makemigrations", "part")
manage("makemigrations", "stock")
manage("makemigrations", "supplier")
manage("makemigrations", "project")
manage("makemigrations", "track")
# Update the database
manage("migrate")
# Check for errors
manage("check")
if not args.update:
print("\n\nAdmin account:\nIf a superuser is not already installed,")
print("run the command 'python InvenTree/manage.py createsuperuser'")
| {"/InvenTree/track/admin.py": ["/InvenTree/track/models.py"], "/InvenTree/part/serializers.py": ["/InvenTree/part/models.py"], "/InvenTree/project/admin.py": ["/InvenTree/project/models.py"], "/InvenTree/part/admin.py": ["/InvenTree/part/models.py"], "/InvenTree/stock/views.py": ["/InvenTree/stock/models.py"], "/InvenTree/supplier/views.py": ["/InvenTree/supplier/models.py"], "/InvenTree/part/views.py": ["/InvenTree/part/models.py", "/InvenTree/part/serializers.py"], "/InvenTree/supplier/admin.py": ["/InvenTree/supplier/models.py"], "/InvenTree/stock/admin.py": ["/InvenTree/stock/models.py"]} |
62,276 | zakx/InvenTree | refs/heads/master | /InvenTree/stock/models.py | from __future__ import unicode_literals
from django.utils.translation import ugettext as _
from django.db import models
from part.models import Part
from InvenTree.models import InvenTreeTree
class Warehouse(InvenTreeTree):
pass
class StockItem(models.Model):
part = models.ForeignKey(Part,
on_delete=models.CASCADE,
related_name='locations')
location = models.ForeignKey(Warehouse, on_delete=models.CASCADE)
quantity = models.PositiveIntegerField()
updated = models.DateField(auto_now=True)
# last time the stock was checked / counted
last_checked = models.DateField(blank=True, null=True)
review_needed = models.BooleanField(default=False)
# Stock status types
ITEM_IN_STOCK = 10
ITEM_INCOMING = 15
ITEM_IN_PROGRESS = 20
ITEM_COMPLETE = 25
ITEM_ATTENTION = 50
ITEM_DAMAGED = 55
ITEM_DESTROYED = 60
ITEM_STATUS_CODES = {
ITEM_IN_STOCK: _("In stock"),
ITEM_INCOMING: _("Incoming"),
ITEM_IN_PROGRESS: _("In progress"),
ITEM_COMPLETE: _("Complete"),
ITEM_ATTENTION: _("Attention needed"),
ITEM_DAMAGED: _("Damaged"),
ITEM_DESTROYED: _("Destroyed")
}
status = models.PositiveIntegerField(
default=ITEM_IN_STOCK,
choices=ITEM_STATUS_CODES.items())
# If stock item is incoming, an (optional) ETA field
expected_arrival = models.DateField(null=True, blank=True)
def __str__(self):
return "{n} x {part} @ {loc}".format(
n=self.quantity,
part=self.part.name,
loc=self.location.name)
| {"/InvenTree/track/admin.py": ["/InvenTree/track/models.py"], "/InvenTree/part/serializers.py": ["/InvenTree/part/models.py"], "/InvenTree/project/admin.py": ["/InvenTree/project/models.py"], "/InvenTree/part/admin.py": ["/InvenTree/part/models.py"], "/InvenTree/stock/views.py": ["/InvenTree/stock/models.py"], "/InvenTree/supplier/views.py": ["/InvenTree/supplier/models.py"], "/InvenTree/part/views.py": ["/InvenTree/part/models.py", "/InvenTree/part/serializers.py"], "/InvenTree/supplier/admin.py": ["/InvenTree/supplier/models.py"], "/InvenTree/stock/admin.py": ["/InvenTree/stock/models.py"]} |
62,277 | zakx/InvenTree | refs/heads/master | /InvenTree/track/models.py | from __future__ import unicode_literals
from django.utils.translation import ugettext as _
from django.db import models
from django.contrib.auth.models import User
from supplier.models import Customer
from part.models import Part, PartRevision
class UniquePart(models.Model):
""" A unique instance of a Part object.
Used for tracking parts based on serial numbers,
and tracking all events in the life of a part
"""
part = models.ForeignKey(Part, on_delete=models.CASCADE)
revision = models.ForeignKey(PartRevision,
on_delete=models.CASCADE,
blank=True,
null=True)
creation_date = models.DateField(auto_now_add=True,
editable=False)
serial = models.IntegerField()
createdBy = models.ForeignKey(User)
customer = models.ForeignKey(Customer, blank=True, null=True)
# Part status types
PART_IN_PROGRESS = 0
PART_IN_STOCK = 10
PART_SHIPPED = 20
PART_RETURNED = 30
PART_DAMAGED = 40
PART_DESTROYED = 50
PART_STATUS_CODES = {
PART_IN_PROGRESS: _("In progress"),
PART_IN_STOCK: _("In stock"),
PART_SHIPPED: _("Shipped"),
PART_RETURNED: _("Returned"),
PART_DAMAGED: _("Damaged"),
PART_DESTROYED: _("Destroyed")
}
status = models.IntegerField(default=PART_IN_PROGRESS, choices=PART_STATUS_CODES.items())
def __str__(self):
return self.part.name
class PartTrackingInfo(models.Model):
""" Single data-point in the life of a UniquePart
Each time something happens to the UniquePart,
a new PartTrackingInfo object should be created.
"""
part = models.ForeignKey(UniquePart, on_delete=models.CASCADE)
date = models.DateField(auto_now_add=True,
editable=False)
notes = models.CharField(max_length=500)
| {"/InvenTree/track/admin.py": ["/InvenTree/track/models.py"], "/InvenTree/part/serializers.py": ["/InvenTree/part/models.py"], "/InvenTree/project/admin.py": ["/InvenTree/project/models.py"], "/InvenTree/part/admin.py": ["/InvenTree/part/models.py"], "/InvenTree/stock/views.py": ["/InvenTree/stock/models.py"], "/InvenTree/supplier/views.py": ["/InvenTree/supplier/models.py"], "/InvenTree/part/views.py": ["/InvenTree/part/models.py", "/InvenTree/part/serializers.py"], "/InvenTree/supplier/admin.py": ["/InvenTree/supplier/models.py"], "/InvenTree/stock/admin.py": ["/InvenTree/stock/models.py"]} |
62,278 | zakx/InvenTree | refs/heads/master | /InvenTree/part/serializers.py | from rest_framework import serializers
from .models import Part, PartCategory, PartParameter
class ParameterSerializer(serializers.ModelSerializer):
class Meta:
model = PartParameter
fields = ('name',
'value',
'units')
class PartSerializer(serializers.ModelSerializer):
params = ParameterSerializer(source='parameters', many=True)
class Meta:
model = Part
fields = ('pk',
'name',
'IPN',
'description',
'category',
'stock',
'params')
class PartCategorySerializer(serializers.ModelSerializer):
class Meta:
model = PartCategory
fields = ('pk',
'name',
'description',
'parent',
'path')
| {"/InvenTree/track/admin.py": ["/InvenTree/track/models.py"], "/InvenTree/part/serializers.py": ["/InvenTree/part/models.py"], "/InvenTree/project/admin.py": ["/InvenTree/project/models.py"], "/InvenTree/part/admin.py": ["/InvenTree/part/models.py"], "/InvenTree/stock/views.py": ["/InvenTree/stock/models.py"], "/InvenTree/supplier/views.py": ["/InvenTree/supplier/models.py"], "/InvenTree/part/views.py": ["/InvenTree/part/models.py", "/InvenTree/part/serializers.py"], "/InvenTree/supplier/admin.py": ["/InvenTree/supplier/models.py"], "/InvenTree/stock/admin.py": ["/InvenTree/stock/models.py"]} |
62,279 | zakx/InvenTree | refs/heads/master | /InvenTree/project/admin.py | from django.contrib import admin
from .models import ProjectCategory, Project, ProjectPart, ProjectRun
class ProjectCategoryAdmin(admin.ModelAdmin):
list_display = ('name', 'path', 'description')
class ProjectAdmin(admin.ModelAdmin):
list_display = ('name', 'description', 'category')
class ProjectPartAdmin(admin.ModelAdmin):
list_display = ('part', 'project', 'quantity')
class ProjectRunAdmin(admin.ModelAdmin):
list_display = ('project', 'quantity', 'run_date')
admin.site.register(ProjectCategory, ProjectCategoryAdmin)
admin.site.register(Project, ProjectAdmin)
admin.site.register(ProjectPart, ProjectPartAdmin)
admin.site.register(ProjectRun, ProjectRunAdmin)
| {"/InvenTree/track/admin.py": ["/InvenTree/track/models.py"], "/InvenTree/part/serializers.py": ["/InvenTree/part/models.py"], "/InvenTree/project/admin.py": ["/InvenTree/project/models.py"], "/InvenTree/part/admin.py": ["/InvenTree/part/models.py"], "/InvenTree/stock/views.py": ["/InvenTree/stock/models.py"], "/InvenTree/supplier/views.py": ["/InvenTree/supplier/models.py"], "/InvenTree/part/views.py": ["/InvenTree/part/models.py", "/InvenTree/part/serializers.py"], "/InvenTree/supplier/admin.py": ["/InvenTree/supplier/models.py"], "/InvenTree/stock/admin.py": ["/InvenTree/stock/models.py"]} |
62,280 | zakx/InvenTree | refs/heads/master | /InvenTree/part/admin.py | from django.contrib import admin
from .models import PartCategory, Part, PartParameter, PartParameterTemplate, CategoryParameterLink
class PartAdmin(admin.ModelAdmin):
list_display = ('name', 'IPN', 'stock', 'category')
class PartCategoryAdmin(admin.ModelAdmin):
list_display = ('name', 'path', 'description')
class ParameterTemplateAdmin(admin.ModelAdmin):
list_display = ('name', 'units', 'format')
class ParameterAdmin(admin.ModelAdmin):
list_display = ('part', 'template', 'value')
admin.site.register(Part, PartAdmin)
admin.site.register(PartCategory, PartCategoryAdmin)
admin.site.register(PartParameter, ParameterAdmin)
admin.site.register(PartParameterTemplate, ParameterTemplateAdmin)
admin.site.register(CategoryParameterLink)
| {"/InvenTree/track/admin.py": ["/InvenTree/track/models.py"], "/InvenTree/part/serializers.py": ["/InvenTree/part/models.py"], "/InvenTree/project/admin.py": ["/InvenTree/project/models.py"], "/InvenTree/part/admin.py": ["/InvenTree/part/models.py"], "/InvenTree/stock/views.py": ["/InvenTree/stock/models.py"], "/InvenTree/supplier/views.py": ["/InvenTree/supplier/models.py"], "/InvenTree/part/views.py": ["/InvenTree/part/models.py", "/InvenTree/part/serializers.py"], "/InvenTree/supplier/admin.py": ["/InvenTree/supplier/models.py"], "/InvenTree/stock/admin.py": ["/InvenTree/stock/models.py"]} |
62,281 | zakx/InvenTree | refs/heads/master | /InvenTree/track/views.py | from django.shortcuts import render, get_object_or_404
from django.http import HttpResponse
def index(request):
return HttpResponse("This is the Tracking page")
| {"/InvenTree/track/admin.py": ["/InvenTree/track/models.py"], "/InvenTree/part/serializers.py": ["/InvenTree/part/models.py"], "/InvenTree/project/admin.py": ["/InvenTree/project/models.py"], "/InvenTree/part/admin.py": ["/InvenTree/part/models.py"], "/InvenTree/stock/views.py": ["/InvenTree/stock/models.py"], "/InvenTree/supplier/views.py": ["/InvenTree/supplier/models.py"], "/InvenTree/part/views.py": ["/InvenTree/part/models.py", "/InvenTree/part/serializers.py"], "/InvenTree/supplier/admin.py": ["/InvenTree/supplier/models.py"], "/InvenTree/stock/admin.py": ["/InvenTree/stock/models.py"]} |
62,282 | zakx/InvenTree | refs/heads/master | /InvenTree/part/urls.py | from django.conf.urls import url
from . import views
urlpatterns = [
# Display part detail
url(r'^(?P<pk>[0-9]+)/$', views.PartDetail.as_view()),
# Display a single part category
url(r'^category/(?P<pk>[0-9]+)/$', views.PartCategoryDetail.as_view()),
# Display a list of top-level categories
url(r'^category/$', views.PartCategoryList.as_view()),
# Display list of parts
url(r'^$', views.PartList.as_view())
]
| {"/InvenTree/track/admin.py": ["/InvenTree/track/models.py"], "/InvenTree/part/serializers.py": ["/InvenTree/part/models.py"], "/InvenTree/project/admin.py": ["/InvenTree/project/models.py"], "/InvenTree/part/admin.py": ["/InvenTree/part/models.py"], "/InvenTree/stock/views.py": ["/InvenTree/stock/models.py"], "/InvenTree/supplier/views.py": ["/InvenTree/supplier/models.py"], "/InvenTree/part/views.py": ["/InvenTree/part/models.py", "/InvenTree/part/serializers.py"], "/InvenTree/supplier/admin.py": ["/InvenTree/supplier/models.py"], "/InvenTree/stock/admin.py": ["/InvenTree/stock/models.py"]} |
62,283 | zakx/InvenTree | refs/heads/master | /InvenTree/part/models.py | from __future__ import unicode_literals
from django.utils.translation import ugettext as _
from django.db import models
from django.db.models import Sum
from django.core.exceptions import ObjectDoesNotExist, ValidationError
from InvenTree.models import InvenTreeTree
class PartCategory(InvenTreeTree):
""" PartCategory provides hierarchical organization of Part objects.
"""
class Meta:
verbose_name = "Part Category"
verbose_name_plural = "Part Categories"
class Part(models.Model):
""" Represents a """
# Short name of the part
name = models.CharField(max_length=100)
# Longer description of the part (optional)
description = models.CharField(max_length=250, blank=True)
# Internal Part Number (optional)
IPN = models.CharField(max_length=100, blank=True)
# Part category - all parts must be assigned to a category
category = models.ForeignKey(PartCategory, on_delete=models.CASCADE)
# Minimum "allowed" stock level
minimum_stock = models.PositiveIntegerField(default=0)
# Units of quantity for this part. Default is "pcs"
units = models.CharField(max_length=20, default="pcs", blank=True)
# Is this part "trackable"?
# Trackable parts can have unique instances which are assigned serial numbers
# and can have their movements tracked
trackable = models.BooleanField(default=False)
def __str__(self):
if self.IPN:
return "{name} ({ipn})".format(
ipn=self.IPN,
name=self.name)
else:
return self.name
class Meta:
verbose_name = "Part"
verbose_name_plural = "Parts"
@property
def stock(self):
""" Return the total stock quantity for this part.
Part may be stored in multiple locations
"""
stocks = self.locations.all()
if len(stocks) == 0:
return 0
result = stocks.aggregate(total=Sum('quantity'))
return result['total']
@property
def projects(self):
""" Return a list of unique projects that this part is associated with.
A part may be used in zero or more projects.
"""
project_ids = set()
project_parts = self.projectpart_set.all()
projects = []
for pp in project_parts:
if pp.project.id not in project_ids:
project_ids.add(pp.project.id)
projects.append(pp.project)
return projects
class PartParameterTemplate(models.Model):
""" A PartParameterTemplate pre-defines a parameter field,
ready to be copied for use with a given Part.
A PartParameterTemplate can be optionally associated with a PartCategory
"""
name = models.CharField(max_length=20)
description = models.CharField(max_length=100, blank=True)
units = models.CharField(max_length=10, blank=True)
default_value = models.CharField(max_length=50, blank=True)
default_min = models.CharField(max_length=50, blank=True)
default_max = models.CharField(max_length=50, blank=True)
# Parameter format
PARAM_NUMERIC = 10
PARAM_TEXT = 20
PARAM_BOOL = 30
PARAM_TYPE_CODES = {
PARAM_NUMERIC: _("Numeric"),
PARAM_TEXT: _("Text"),
PARAM_BOOL: _("Bool")
}
format = models.IntegerField(
default=PARAM_NUMERIC,
choices=PARAM_TYPE_CODES.items())
def __str__(self):
return "{name} ({units})".format(
name=self.name,
units=self.units)
class Meta:
verbose_name = "Parameter Template"
verbose_name_plural = "Parameter Templates"
class CategoryParameterLink(models.Model):
""" Links a PartParameterTemplate to a PartCategory
"""
category = models.ForeignKey(PartCategory, on_delete=models.CASCADE)
template = models.ForeignKey(PartParameterTemplate, on_delete=models.CASCADE)
def __str__(self):
return "{name} - {cat}".format(
name=self.template.name,
cat=self.category)
class Meta:
verbose_name = "Category Parameter"
verbose_name_plural = "Category Parameters"
class PartParameter(models.Model):
""" PartParameter is associated with a single part
"""
part = models.ForeignKey(Part, on_delete=models.CASCADE, related_name='parameters')
template = models.ForeignKey(PartParameterTemplate)
# Value data
value = models.CharField(max_length=50, blank=True)
min_value = models.CharField(max_length=50, blank=True)
max_value = models.CharField(max_length=50, blank=True)
# Prevent multiple parameters of the same template
# from being added to the same part
def save(self, *args, **kwargs):
params = PartParameter.objects.filter(part=self.part, template=self.template)
if len(params) > 1:
return
if len(params) == 1 and params[0].id != self.id:
return
super(PartParameter, self).save(*args, **kwargs)
def __str__(self):
return "{name} : {val}{units}".format(
name=self.template.name,
val=self.value,
units=self.template.units)
@property
def units(self):
return self.template.units
@property
def name(self):
return self.template.name
class Meta:
verbose_name = "Part Parameter"
verbose_name_plural = "Part Parameters"
class PartRevision(models.Model):
""" A PartRevision represents a change-notification to a Part
A Part may go through several revisions in its lifetime,
which should be tracked.
UniqueParts can have a single associated PartRevision
"""
part = models.ForeignKey(Part, on_delete=models.CASCADE)
name = models.CharField(max_length=100)
description = models.CharField(max_length=500)
revision_date = models.DateField(auto_now_add=True)
def __str__(self):
return self.name
| {"/InvenTree/track/admin.py": ["/InvenTree/track/models.py"], "/InvenTree/part/serializers.py": ["/InvenTree/part/models.py"], "/InvenTree/project/admin.py": ["/InvenTree/project/models.py"], "/InvenTree/part/admin.py": ["/InvenTree/part/models.py"], "/InvenTree/stock/views.py": ["/InvenTree/stock/models.py"], "/InvenTree/supplier/views.py": ["/InvenTree/supplier/models.py"], "/InvenTree/part/views.py": ["/InvenTree/part/models.py", "/InvenTree/part/serializers.py"], "/InvenTree/supplier/admin.py": ["/InvenTree/supplier/models.py"], "/InvenTree/stock/admin.py": ["/InvenTree/stock/models.py"]} |
62,284 | zakx/InvenTree | refs/heads/master | /InvenTree/stock/views.py | from django.shortcuts import render, get_object_or_404
from django.http import HttpResponse
from .models import Warehouse, StockItem
def index(request):
warehouses = Warehouse.objects.filter(parent=None)
return render(request, 'stock/index.html', {'warehouses': warehouses})
| {"/InvenTree/track/admin.py": ["/InvenTree/track/models.py"], "/InvenTree/part/serializers.py": ["/InvenTree/part/models.py"], "/InvenTree/project/admin.py": ["/InvenTree/project/models.py"], "/InvenTree/part/admin.py": ["/InvenTree/part/models.py"], "/InvenTree/stock/views.py": ["/InvenTree/stock/models.py"], "/InvenTree/supplier/views.py": ["/InvenTree/supplier/models.py"], "/InvenTree/part/views.py": ["/InvenTree/part/models.py", "/InvenTree/part/serializers.py"], "/InvenTree/supplier/admin.py": ["/InvenTree/supplier/models.py"], "/InvenTree/stock/admin.py": ["/InvenTree/stock/models.py"]} |
62,285 | zakx/InvenTree | refs/heads/master | /InvenTree/supplier/views.py | from django.shortcuts import render, get_object_or_404
from django.http import HttpResponse
from .models import Supplier
def index(request):
return HttpResponse("This is the suppliers page")
def supplierDetail(request, supplier_id):
supplier = get_object_or_404(Supplier, pk=supplier_id)
return render(request, 'supplier/detail.html',
{'supplier': supplier})
| {"/InvenTree/track/admin.py": ["/InvenTree/track/models.py"], "/InvenTree/part/serializers.py": ["/InvenTree/part/models.py"], "/InvenTree/project/admin.py": ["/InvenTree/project/models.py"], "/InvenTree/part/admin.py": ["/InvenTree/part/models.py"], "/InvenTree/stock/views.py": ["/InvenTree/stock/models.py"], "/InvenTree/supplier/views.py": ["/InvenTree/supplier/models.py"], "/InvenTree/part/views.py": ["/InvenTree/part/models.py", "/InvenTree/part/serializers.py"], "/InvenTree/supplier/admin.py": ["/InvenTree/supplier/models.py"], "/InvenTree/stock/admin.py": ["/InvenTree/stock/models.py"]} |
62,286 | zakx/InvenTree | refs/heads/master | /InvenTree/part/views.py | from django.shortcuts import render, get_object_or_404
from django.http import HttpResponse, Http404
from rest_framework import generics
from .models import PartCategory, Part
from .serializers import PartSerializer, PartCategorySerializer
def index(request):
return HttpResponse("Hello world. This is the parts page")
class PartDetail(generics.RetrieveAPIView):
queryset = Part.objects.all()
serializer_class = PartSerializer
class PartList(generics.ListAPIView):
queryset = Part.objects.all()
serializer_class = PartSerializer
class PartCategoryDetail(generics.RetrieveAPIView):
queryset = PartCategory.objects.all()
serializer_class = PartCategorySerializer
class PartCategoryList(generics.ListAPIView):
queryset = PartCategory.objects.all()
serializer_class = PartCategorySerializer
| {"/InvenTree/track/admin.py": ["/InvenTree/track/models.py"], "/InvenTree/part/serializers.py": ["/InvenTree/part/models.py"], "/InvenTree/project/admin.py": ["/InvenTree/project/models.py"], "/InvenTree/part/admin.py": ["/InvenTree/part/models.py"], "/InvenTree/stock/views.py": ["/InvenTree/stock/models.py"], "/InvenTree/supplier/views.py": ["/InvenTree/supplier/models.py"], "/InvenTree/part/views.py": ["/InvenTree/part/models.py", "/InvenTree/part/serializers.py"], "/InvenTree/supplier/admin.py": ["/InvenTree/supplier/models.py"], "/InvenTree/stock/admin.py": ["/InvenTree/stock/models.py"]} |
62,287 | zakx/InvenTree | refs/heads/master | /InvenTree/supplier/admin.py | from django.contrib import admin
from .models import Supplier, SupplierPart, Customer, Manufacturer
class CompanyAdmin(admin.ModelAdmin):
list_display = ('name', 'URL', 'contact')
admin.site.register(Customer, CompanyAdmin)
admin.site.register(Supplier, CompanyAdmin)
admin.site.register(Manufacturer, CompanyAdmin)
admin.site.register(SupplierPart)
| {"/InvenTree/track/admin.py": ["/InvenTree/track/models.py"], "/InvenTree/part/serializers.py": ["/InvenTree/part/models.py"], "/InvenTree/project/admin.py": ["/InvenTree/project/models.py"], "/InvenTree/part/admin.py": ["/InvenTree/part/models.py"], "/InvenTree/stock/views.py": ["/InvenTree/stock/models.py"], "/InvenTree/supplier/views.py": ["/InvenTree/supplier/models.py"], "/InvenTree/part/views.py": ["/InvenTree/part/models.py", "/InvenTree/part/serializers.py"], "/InvenTree/supplier/admin.py": ["/InvenTree/supplier/models.py"], "/InvenTree/stock/admin.py": ["/InvenTree/stock/models.py"]} |
62,288 | zakx/InvenTree | refs/heads/master | /InvenTree/project/models.py | from __future__ import unicode_literals
from django.utils.translation import ugettext as _
from django.db import models
from InvenTree.models import InvenTreeTree
from part.models import Part
class ProjectCategory(InvenTreeTree):
""" ProjectCategory provides hierarchical organization of Project objects.
Each ProjectCategory can contain zero-or-more child categories,
and in turn can have zero-or-one parent category.
"""
class Meta:
verbose_name = "Project Category"
verbose_name_plural = "Project Categories"
class Project(models.Model):
""" A Project takes multiple Part objects.
A project can output zero-or-more Part objects
"""
name = models.CharField(max_length=100)
description = models.CharField(max_length=500, blank=True)
category = models.ForeignKey(ProjectCategory, on_delete=models.CASCADE)
def __str__(self):
return self.name
@property
def projectParts(self):
""" Return a list of all project parts associated with this project
"""
return self.projectpart_set.all()
class ProjectPart(models.Model):
""" A project part associates a single part with a project
The quantity of parts required for a single-run of that project is stored.
The overage is the number of extra parts that are generally used for a single run.
"""
# Overage types
OVERAGE_PERCENT = 0
OVERAGE_ABSOLUTE = 1
OVARAGE_CODES = {
OVERAGE_PERCENT: _("Percent"),
OVERAGE_ABSOLUTE: _("Absolute")
}
part = models.ForeignKey(Part, on_delete=models.CASCADE)
project = models.ForeignKey(Project, on_delete=models.CASCADE)
quantity = models.PositiveIntegerField(default=1)
overage = models.FloatField(default=0)
overage_type = models.PositiveIntegerField(
default=1,
choices=OVARAGE_CODES.items())
def __str__(self):
return "{quan} x {name}".format(
name=self.part.name,
quan=self.quantity)
class ProjectRun(models.Model):
""" A single run of a particular project.
Tracks the number of 'units' made in the project.
Provides functionality to update stock,
based on both:
a) Parts used (project inputs)
b) Parts produced (project outputs)
"""
project = models.ForeignKey(Project, on_delete=models.CASCADE)
quantity = models.PositiveIntegerField(default=1)
run_date = models.DateField(auto_now_add=True)
| {"/InvenTree/track/admin.py": ["/InvenTree/track/models.py"], "/InvenTree/part/serializers.py": ["/InvenTree/part/models.py"], "/InvenTree/project/admin.py": ["/InvenTree/project/models.py"], "/InvenTree/part/admin.py": ["/InvenTree/part/models.py"], "/InvenTree/stock/views.py": ["/InvenTree/stock/models.py"], "/InvenTree/supplier/views.py": ["/InvenTree/supplier/models.py"], "/InvenTree/part/views.py": ["/InvenTree/part/models.py", "/InvenTree/part/serializers.py"], "/InvenTree/supplier/admin.py": ["/InvenTree/supplier/models.py"], "/InvenTree/stock/admin.py": ["/InvenTree/stock/models.py"]} |
62,289 | zakx/InvenTree | refs/heads/master | /InvenTree/supplier/urls.py | from django.conf.urls import url
from . import views
urlpatterns = [
# Display details of a supplier
url(r'^(?P<supplier_id>[0-9]+)/$', views.supplierDetail, name='detail'),
url(r'^$', views.index, name='index')
]
| {"/InvenTree/track/admin.py": ["/InvenTree/track/models.py"], "/InvenTree/part/serializers.py": ["/InvenTree/part/models.py"], "/InvenTree/project/admin.py": ["/InvenTree/project/models.py"], "/InvenTree/part/admin.py": ["/InvenTree/part/models.py"], "/InvenTree/stock/views.py": ["/InvenTree/stock/models.py"], "/InvenTree/supplier/views.py": ["/InvenTree/supplier/models.py"], "/InvenTree/part/views.py": ["/InvenTree/part/models.py", "/InvenTree/part/serializers.py"], "/InvenTree/supplier/admin.py": ["/InvenTree/supplier/models.py"], "/InvenTree/stock/admin.py": ["/InvenTree/stock/models.py"]} |
62,290 | zakx/InvenTree | refs/heads/master | /InvenTree/supplier/models.py | from __future__ import unicode_literals
from django.db import models
from InvenTree.models import Company
from part.models import Part
class Supplier(Company):
""" Represents a manufacturer or supplier
"""
pass
class Manufacturer(Company):
""" Represents a manfufacturer
"""
pass
class Customer(Company):
""" Represents a customer
"""
pass
class SupplierPart(models.Model):
""" Represents a unique part as provided by a Supplier
Each SupplierPart is identified by a MPN (Manufacturer Part Number)
Each SupplierPart is also linked to a Part object
- A Part may be available from multiple suppliers
"""
part = models.ForeignKey(Part, null=True, blank=True, on_delete=models.CASCADE)
supplier = models.ForeignKey(Supplier, on_delete=models.CASCADE)
SKU = models.CharField(max_length=100)
manufacturer = models.ForeignKey(Manufacturer, blank=True, null=True, on_delete=models.CASCADE)
MPN = models.CharField(max_length=100, blank=True)
URL = models.URLField(blank=True)
description = models.CharField(max_length=250, blank=True)
# Default price for a single unit
single_price = models.DecimalField(max_digits=10, decimal_places=3, default=0)
# Base charge added to order independent of quantity e.g. "Reeling Fee"
base_cost = models.DecimalField(max_digits=10, decimal_places=3, default=0)
# packaging that the part is supplied in, e.g. "Reel"
packaging = models.CharField(max_length=50, blank=True)
# multiple that the part is provided in
multiple = models.PositiveIntegerField(default=1)
# Mimumum number required to order
minimum = models.PositiveIntegerField(default=1)
# lead time for parts that cannot be delivered immediately
lead_time = models.DurationField(blank=True, null=True)
def __str__(self):
return "{sku} - {supplier}".format(
sku=self.SKU,
supplier=self.supplier.name)
class SupplierPriceBreak(models.Model):
""" Represents a quantity price break for a SupplierPart
- Suppliers can offer discounts at larger quantities
- SupplierPart(s) may have zero-or-more associated SupplierPriceBreak(s)
"""
part = models.ForeignKey(SupplierPart, on_delete=models.CASCADE, related_name='price_breaks')
quantity = models.PositiveIntegerField()
cost = models.DecimalField(max_digits=10, decimal_places=3)
def __str__(self):
return "{mpn} - {cost}{currency} @ {quan}".format(
mpn=part.MPN,
cost=self.cost,
currency=self.currency if self.currency else '',
quan=self.quantity)
| {"/InvenTree/track/admin.py": ["/InvenTree/track/models.py"], "/InvenTree/part/serializers.py": ["/InvenTree/part/models.py"], "/InvenTree/project/admin.py": ["/InvenTree/project/models.py"], "/InvenTree/part/admin.py": ["/InvenTree/part/models.py"], "/InvenTree/stock/views.py": ["/InvenTree/stock/models.py"], "/InvenTree/supplier/views.py": ["/InvenTree/supplier/models.py"], "/InvenTree/part/views.py": ["/InvenTree/part/models.py", "/InvenTree/part/serializers.py"], "/InvenTree/supplier/admin.py": ["/InvenTree/supplier/models.py"], "/InvenTree/stock/admin.py": ["/InvenTree/stock/models.py"]} |
62,291 | zakx/InvenTree | refs/heads/master | /InvenTree/stock/admin.py | from django.contrib import admin
from .models import Warehouse, StockItem
class WarehouseAdmin(admin.ModelAdmin):
list_display = ('name', 'path', 'description')
class StockItemAdmin(admin.ModelAdmin):
list_display = ('part', 'quantity', 'location', 'status', 'updated')
admin.site.register(Warehouse, WarehouseAdmin)
admin.site.register(StockItem, StockItemAdmin)
| {"/InvenTree/track/admin.py": ["/InvenTree/track/models.py"], "/InvenTree/part/serializers.py": ["/InvenTree/part/models.py"], "/InvenTree/project/admin.py": ["/InvenTree/project/models.py"], "/InvenTree/part/admin.py": ["/InvenTree/part/models.py"], "/InvenTree/stock/views.py": ["/InvenTree/stock/models.py"], "/InvenTree/supplier/views.py": ["/InvenTree/supplier/models.py"], "/InvenTree/part/views.py": ["/InvenTree/part/models.py", "/InvenTree/part/serializers.py"], "/InvenTree/supplier/admin.py": ["/InvenTree/supplier/models.py"], "/InvenTree/stock/admin.py": ["/InvenTree/stock/models.py"]} |
62,295 | kpitzen/routegetter | refs/heads/master | /routesparser.py | """RoutesParser
This module will use the requests package to find daily route information
in html form from the cyride website, and use beautifulsoup4 to parse that information
into a usable form.
"""
from os.path import join, abspath, sep
import io
import requests
from bs4 import BeautifulSoup
DATA_DIR = join(sep.join(sep.split(abspath(__file__))[:-1]), 'data', 'html')
def get_request(url: str, payload: dict=None, request_type: str='get') -> requests.get:
'''this function takes a url and a dictionary payload and
returns a get request'''
if request_type == 'put':
output_request = requests.put(url, params=payload)
else:
output_request = requests.get(url, params=payload)
try:
assert output_request.status_code != 404
except AssertionError:
print('ERROR: request has failed')
raise
return output_request
def prettify_html_file(html_file: io.TextIOBase) -> BeautifulSoup:
'''uses beautifulsoup to access structure of HTML data'''
soup = BeautifulSoup(html_file, 'html.parser')
return soup
class RouteGetter():
'''This class will download the cyride route information into a textfile for
beautifulsoup to consume'''
def __init__(self, url: str, payload: dict, request_type: str='get'):
self.payload = payload
self.url = url
self.request = get_request(url=self.url, payload=self.payload, request_type=request_type)
def export_route_data(self):
'''write all data from request to an html file'''
data_filename = join(DATA_DIR, 'route_index.html')
with open(data_filename, 'w') as data_file:
print(self.request.text, file=data_file, flush=True)
class RouteParser():
'''this class will consume the exported html file from RouteGetter,
and parse it into a usable form, writing out a python .bin file'''
def __init__(self):
self.data_file = join(DATA_DIR, 'route_index.html')
with open(self.data_file, 'r') as html_file:
self.pretty_html = prettify_html_file(html_file)
| {"/unittesting.py": ["/routesparser.py"]} |
62,296 | kpitzen/routegetter | refs/heads/master | /unittesting.py | '''We will test all routegetter methods in this test suite'''
from os.path import join, abspath, sep
import unittest
import logging
import routesparser
from faker import Faker
LOG_FILE = join(sep.join(sep.split(abspath(__file__))[:-1]), 'log', 'testing', 'testing.log')
class RoutesGetterTests(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.log = logging.getLogger('RouteGetterTests')
cls.log.setLevel(logging.DEBUG)
cls.routegetter = routesparser.RouteGetter(url='http://www.cyride.com/index.aspx'
, payload={'page':1212})
cls.data_generator = Faker()
def setUp(self):
self.bad_url = self.data_generator.url()
def test_cyride_request(self):
'''we want to test that our request succeeds at cyride'''
log = self.log.getChild('test_cyride_request')
request = self.routegetter.request
self.assertNotEqual(request.status_code, 404)
log.debug('%s, %s', request.url, request)
@unittest.expectedFailure
def test_bad_url(self):
log = self.log.getChild('test_bad_url')
request = routesparser.get_request(self.bad_url)
self.assertEqual(request.status_code, 404)
log.debug(request.url, request)
class RoutesParserTests(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.log = logging.getLogger('RouteParserTests')
cls.log.setLevel(logging.DEBUG)
cls.routeparser = routesparser.RouteParser()
def test_souped_data(self):
log = self.log.getChild('test_souped_data')
pretty_html = self.routeparser.pretty_html
self.assertIsNotNone(self.routeparser.pretty_html)
log.info(pretty_html.title.string)
if __name__ == '__main__':
logging.basicConfig(filename=LOG_FILE, filemode='w')
unittest.main()
| {"/unittesting.py": ["/routesparser.py"]} |
62,297 | encorehu/nlp | refs/heads/master | /setup.py | #!/usr/bin/env python
#-*- coding:utf-8 -*-
from setuptools import setup, find_packages
setup(
name = "nlp",
version="0.1.0",
packages = find_packages(),
zip_safe = False,
description = "python nlp lib, for fun.",
long_description = "python nlp lib, for fun.",
author = "encorehu",
author_email = "huyoo353@126.com",
license = "MIT",
keywords = ("nlp", "egg"),
platforms = "all",
url = "https://github.com/encorehu/nlp",
)
| {"/nlp/extractors/html.py": ["/nlp/utils/html2txt.py"]} |
62,298 | encorehu/nlp | refs/heads/master | /nlp/extractors/base.py | import re
class BaseExtractor(object):
def build_valid_filename(self, text):
dst=text
for x in '\t\n\':;",.[](){}~!@#$%^&*_+-=/<>?':
dst=dst.replace(x,' ')
dst=dst.replace(' ','-').replace('--','-').replace('--','-')
dst=dst.strip('-')
return dst
def find_between(self, text, s1, s2=None):
if not s1:
raise Exception('s1 is None!')
pos1 = text.find(s1)
if s2 and pos1 != -1:
pos2 = text.find(s2, pos1)
else:
pos2 = -1
if pos2 != -1 and pos2>pos1:
return text[pos1+len(s1):pos2]
else:
return ''
def _extract(self, html):
result =[]
return result
def extract(self, html):
return self._extract(html)
class BaseRegexExtractor(object):
regex = None
def _extract(self, html, regex=None):
result =[]
if regex == None:
regex = self.regex
if regex == None:
return result
p = re.compile(regex)
result = p.findall(html)
return result
def extract(self, html, regex=None):
return self._extract(html, regex=regex)
| {"/nlp/extractors/html.py": ["/nlp/utils/html2txt.py"]} |
62,299 | encorehu/nlp | refs/heads/master | /nlp/tokenizers.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
__author__ = 'Encore Hu, <huyoo353@126.com>'
import sys
import os
import time
start = time.time()
def _tokenize(textline):
#print 'you call me'
for x in textline:
yield x
tokenize = _tokenize
class BaseChineseWordTokenizer(object):
'''针对一行短语进行处理, 比如一行文本, 或者几个字组合成的短句子. '''
def __init__(self, textline='', *args, **kwargs):
self.textline = textline
def __iter__(self):
# 之类需要实现如何从行文本中提取token, 使用list, 或者 generator
raise NotImplementedError('Please use subclass of BaseChineseWordTokenizer, and Implemented __iter__() method.')
def __call__(self):
print 'you call me'
def process_tokens(self, text, tokens=[]):
#for i,token in enumerate(tokens):
# print i,token
# 如果传入了tokens, 即传入了有其他分词器处理后返回的tokens,
# 那么对每一个token进行处理
if tokens:
###print len(tokens)
t=[]
for token in tokens:
if len(token)>1:# 不处理空的token和仅有一个字符的token
self.textline = token
for x in self: # 这里转到实例的迭代函数 tokenizer.__iter__()
t.append(x) #(list(self))
else:
t.append(token)
return t
else:
# 如果其他分词器未返回tokens, 则可能本分词器是第一个分词器,
#则对传入的原始text数据进行分词.
###if not self.textline: # 即不为'', 或者none
### self.textline = text
self.textline = text
return list(self) # list 操作调用本class的__iter__方法
class ChineseWordTokenizer(BaseChineseWordTokenizer):
def __iter__(self):
for x in self.textline:
yield x
class ChineseWordWhitespaceTokenizer(BaseChineseWordTokenizer):
'''空格分割器'''
def __iter__(self):
# split by whitespace
for x in self.textline.split():
yield x
class ChineseWordDotTokenizer(BaseChineseWordTokenizer):
'''小数点分割, 不要用这个, 还没写好
pass:
1.2 2.3234
split:
2.
a2.v
'''
#choices = '.'.decode('utf-8')
#choices = list(choices)
choices = ['.']
def __iter__(self):
# split by dot
nflag=0 # 点后面的一个字符是不是数字
tmp=[]
length = len(self.textline)
for i,x in self.textline:
if x in self.choices:
# 忽略标点符号, 并断开
if i == 0:
continue # 点在最前面不管, 直接处理第二个字符
else:
if i+1 < length:
pass # 明天再弄!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
yield ''.join(tmp) # 分开处, yield抛出分的部分结果
tmp=[]
else:
tmp.append(x)
if tmp:
yield ''.join(tmp)
class ChineseWordPunctionTokenizer(BaseChineseWordTokenizer):
# 去掉了小数点, 因为可能是小数中的点, 比如2.3, 因此小数点应该在数字处理完毕之后再去除
# 目前准备放到小数之中处理
# 去掉连字符 - 和 _
Punction = ' ,/;\'[]\<>?:"{}|~!@#$%^&*()+=,。、;‘【】、《》?:“{}§·~!@#¥%……&*()'.decode('utf-8')
def __iter__(self):
tmp=[]
for x in self.textline:
if x in self.Punction:
# 忽略标点符号, 并断开
yield ''.join(tmp) # 分开处, yield抛出分的部分结果
tmp=[]
else:
tmp.append(x)
if tmp:
yield ''.join(tmp)
class ChineseWordBeforeTokenizer(BaseChineseWordTokenizer):
'''在字符之前截断
比如这里有2个人, 就从2个的2前面截断, 成为[这里有, 2个人]
'''
choices = '不把我和比或与非就'.decode('utf-8')
def __iter__(self):
nflag=0
tmp=[]
for x in self.textline:
if x in self.choices: #当前字符是候选字符
if nflag == 1: #前一个字符是候选字符, 则继续添加该字符
tmp.append(x)
else: # 前一个字符不是数值, 那么就把这个东西从前一个字符的后面截断, 把前面的部分抛出
# 不过这个会将 统一, 同一 给分开.
yield ''.join(tmp) # 分开处, yield抛出分的部分结果
tmp=[]
tmp.append(x)
nflag = 1
else:#当前字符不是数值
nflag = 0
tmp.append(x)
if tmp:
yield ''.join(tmp)
class ChineseWordNumericTokenizer(BaseChineseWordTokenizer):
'''字符前截断----数字之前断开, 数字之后还有数字, 从最开始的数字之前断开
比如这里有2个人, 就从2个的2前面截断, 成为[这里有, 2个人]
'''
# 第经常和数字联系在一起, 比如第一, 第二第3名
###numbers = '.0123456789第〇一二三四五六七八九零壹贰叁肆伍陆柒捌玖十百千万亿拾佰仟两元钱块斤辆斗选'.decode('utf-8')
numbers = '.0123456789第〇一二三四五六七八九零壹贰叁肆伍陆柒捌玖十块元角分厘'.decode('utf-8')
def __iter__(self):
nflag=0
tmp=[]
for i,x in enumerate(self.textline):
if x =='.' and i == 0:
continue # . 在第一个位置, 直接处理第二个
if x in self.numbers:#当前字符是数值
if nflag == 1:
#前一个字符是候选的数值, 则继续添加数值,
#比如不把九十八这种分开, 继续添加可选字符, 因为它们是同类或者它们经常联系在一起的
# 即:前后字符都是候选字符的处理
tmp.append(x)
else: # 前一个字符不是数值, 那么就把这个东西从前一个字符的后面截断, 把前面的部分抛出
# 不过这个会将 统一, 同一 给分开.
yield ''.join(tmp) # 分开处, yield抛出分的部分结果
tmp=[]
tmp.append(x)
nflag = 1
else:#当前字符不是数值
nflag = 0
tmp.append(x)
if tmp:
yield ''.join(tmp)
class ChineseWordAfterTokenizer(BaseChineseWordTokenizer):
'''在字符之后截断
以的字为例子, 比如这里有个开心的人, 就从开心的后面截断, 成为[这里有个开心的, 人]
不过, 这个会将 的确 的的确确 给错误分开.
'''
choices = list('于是的都有个性化了'.decode('utf-8'))
def __iter__(self):
nflag=0
tmp=[]
for x in self.textline:
if x in self.choices:#当前字符是的
if nflag == 1: #前一个字符是的
yield ''.join(tmp) # 分开处, yield抛出分的部分结果
tmp=[]
tmp.append(x)
else: # 前一个字符不是的
tmp.append(x)
yield ''.join(tmp) # 分开处, yield抛出分的部分结果
tmp=[]
nflag = 1
else:#当前字符不是的
nflag = 0
tmp.append(x)
if tmp:
yield ''.join(tmp)
class ChineseWordDeTokenizer(BaseChineseWordTokenizer):
'''在字符后截断
以的字为例子, 比如这里有个开心的人, 就从开心的后面截断, 成为[这里有个开心的, 人]
不过, 这个会将 的确 的的确确 给错误分开.
'''
choices = list('于是的都有个性化了吧'.decode('utf-8'))
def __iter__(self):
nflag=0
tmp=[]
for x in self.textline:
if x in self.choices:#当前字符是的
if nflag == 1: #前一个字符是的
yield ''.join(tmp) # 分开处, yield抛出分的部分结果
tmp=[]
tmp.append(x)
else: # 前一个字符不是的
tmp.append(x)
yield ''.join(tmp) # 分开处, yield抛出分的部分结果
tmp=[]
nflag = 1
else:#当前字符不是的
nflag = 0
tmp.append(x)
if tmp:
yield ''.join(tmp)
class ChineseWordFamilyNameTokenizer(BaseChineseWordTokenizer):
'''字符前截断
比如这里有2个人, 就从2个的2前面截断, 成为[这里有, 2个人]
'''
choices = '王陈李张刘'.decode('utf-8')
def __iter__(self):
nflag=0
tmp=[]
for x in self.textline:
if x in self.choices:#当前字符是数值
if nflag == 1: #前一个字符是数值
tmp.append(x)
else: # 前一个字符不是数值, 那么就把这个东西从前一个字符的后面截断, 把前面的部分抛出
# 不过这个会将 统一, 同一 给分开.
yield ''.join(tmp) # 分开处, yield抛出分的部分结果
tmp=[]
tmp.append(x)
nflag = 1
else:#当前字符不是数值
nflag = 0
tmp.append(x)
if tmp:
yield ''.join(tmp)
# 这里设置好需要使用的分词器, 将会按照顺序对待处理文本进行分词
# 长的先分成短的, 短的继续分到分词器无能为力为止.
TOKENIZER_CLASSES=[
ChineseWordWhitespaceTokenizer, # 先根据空格(空格,制表符,换行回车符)进行分词
###ChineseWordNumericTokenizer, # 然后把数值从数值的前面分开, 这个主要是想
ChineseWordPunctionTokenizer, # 然后根据标点符号分词
ChineseWordNumericTokenizer, # 然后把数值从数值的前面分开
ChineseWordDeTokenizer, # 然后把'的'从的后面分开
#ChineseWordBeforeTokenizer, # 然后在一些字符之前截断, 比如 与或非, 我你他
#ChineseWordFamilyNameTokenizer, # 然后按照在姓氏前分开
]
class BaseHandler(object):
def __init__(self):
self._tokenizers = None
self._load_tokenizer()
def _load_tokenizer(self, tobeloadedclasses = TOKENIZER_CLASSES):
tokenizers = []
for tn_class in tobeloadedclasses:
try:
tn_instance = tn_class() # 实例化tokenizer
except: # exceptions.MiddlewareNotUsed:
print '%s init error' % tn_class.__name__
continue
if hasattr(tn_instance, 'process_tokens'):
tokenizers.append(tn_instance.process_tokens)
self._tokenizers = tokenizers
def get_tokens(self, text):
###print len(self._tokenizers)
tokens = []
for tokenizer_method in self._tokenizers:
#print tokenizer_method
#print '-'*80
tokens = tokenizer_method(text, tokens)
#print ('/ '.join(tokens))
###for token in tokens:
### if token:
### yield token
return tokens
if __name__ == '__main__':
text ='''《疯狂的小鸟》是由来自越南的独立游戏开发者Dong Nguyen所开发的作品,游戏中玩家必须控制一只小鸟,跨越由各种不同长度水管所组成的障碍,而这只鸟其实是根本不会飞的……所以玩家每点击一下小鸟就会飞高一点,不点击就会下降,玩家必须控制节奏,拿捏点击屏幕的时间点,让小鸟能在落下的瞬间跳起来,恰好能够通过狭窄的水管缝隙,只要稍一分神,马上就会失败阵亡'''
text = text.decode('utf-8')
bh=BaseHandler()
tokenize = bh.get_tokens
r={}
words_list = tokenize(text)
print text
print '-'*80
print '/ '.join( words_list)
for word in words_list:
r[word]=r.get(word,0)+1
length =len(r)
print u'词语总数:',len(r) | {"/nlp/extractors/html.py": ["/nlp/utils/html2txt.py"]} |
62,300 | encorehu/nlp | refs/heads/master | /nlp/utils/html2txt.py | # -*- coding: utf-8 -*-
import urllib2,httplib
import cookielib
import socket
import re
socket.setdefaulttimeout(40)
def gethtml(url, encoding=None,ref=None):
cj = cookielib.CookieJar()
opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(cj))
req = urllib2.Request(url)
req.add_header('User-Agent', 'Mozilla/4.0 (compatible; MSIE 5.5; Windows NT)')
if ref != None and ref.startswith('http'):
req.add_header('Referer', ref)
else:
req.add_header('Referer', '://'.join(urllib2.urlparse.urlparse(url)[:2]))
#print 'Connecting %s ...' % url
try:
resp = opener.open(req)
#print dir(resp)
#print resp.headers
#print resp.code
#print resp.info()
#print resp.msg
if resp.url != url:
print 'Connect jump to %s' % resp.url
htmlcode= resp.read()
resp.close()
except urllib2.URLError,e:
htmlcode = 'ERROR! Connect to %s FAILED ' % url+str(e)
print htmlcode
except urllib2.HTTPError,e:
htmlcode = 'ERROR! Connect to %s FAILED ' % url+str(e)
print htmlcode
except socket.error,e:
htmlcode = 'ERROR! Connect to %s FAILED ' % url+str(e)
print htmlcode
except socket.timeout,e:
htmlcode = 'ERROR! Connect to %s FAILED ' % url+str(e)
print htmlcode
except httplib.BadStatusLine,e:
htmlcode = 'ERROR! Connect to %s FAILED ' % url+str(e)
print htmlcode
except httplib.IncompleteRead,e:
htmlcode = 'ERROR! Connect to %s FAILED ' % url+str(e)
print htmlcode
finally:
try:
resp.close()
except UnboundLocalError:
pass
if encoding != None:
try:
lll = htmlcode.decode(encoding)
except UnicodeDecodeError:
print 'UnicodeDecodeError'
lll = htmlcode
else:
lll = htmlcode
return lll
def gethtmlbyproxy(proxy,url,encoding='gb18030',ref=None):
# work
#print 'by:', proxy
proxyserver = 'http://%s' % proxy
opener = urllib2.build_opener( urllib2.ProxyHandler({'http':proxyserver}) )
urllib2.install_opener( opener )
req = urllib2.Request(url)
req.add_header('User-Agent', 'Mozilla/4.0 (compatible; MSIE 5.5; Windows NT)')
if ref != None and ref.startswith('http'):
req.add_header('Referer', ref)
else:
req.add_header('Referer', '://'.join(urllib2.urlparse.urlparse(url)[:2]))
#print 'Connecting %s ...' % url
try:
resp = urllib2.urlopen(req)
if resp.url != url:
print 'Connect jump to %s' % resp.url
htmlcode=''
else:
htmlcode= resp.read()
resp.close()
except urllib2.URLError,e:
print 'Connect to %s FAILED' % url,e
htmlcode=''
except urllib2.HTTPError,e:
print 'Connect to %s FAILED' % url,e
htmlcode=''
except socket.error,e:
print 'Connect to %s FAILED' % url,e
htmlcode=''
except socket.timeout,e:
print 'Connect to %s FAILED' % url,e
htmlcode=''
except httplib.BadStatusLine,e:
print 'Connect to %s FAILED' % url,e
htmlcode=''
except httplib.IncompleteRead,e:
print 'Connect to %s FAILED' % url,e
htmlcode=''
try:
result = htmlcode.decode(encoding)
except UnicodeDecodeError:
result = htmlcode
return result
def html2txt(lll):
if len(lll)==0:
return ''
l1=len(lll)
data=[]
piece='' # <> 之间的片段
# 对x当前所处的位置的全局判断
get_into_script_tag = False #是不是在script标记内
get_into_style_tag = False #是不是在style标记内
text_flag = True # 数据标记
start_tag = False # <
end_tag = False # >
script_start = False
script_end = False
style_start = False
style_end = False
log=[]
# 一个一个字符开始探测, 可以称之为流式探测, string flow
for x in lll:
log.append(x)
if x == '<':
start_tag = True # 开始进入标签
end_tag = False # 不是结束标签
text_flag = False # 下一个x不是数据
piece='' # piece 不记录<>这两个符号,因为遇到<, 表示开始了一个新的piece, 这个时候piece重新开始记录<...>之间的符号
#log.append("标记开始了----".decode('utf-8'))
#print x,text_flag,repr(x)
continue # (1)进入了标记, 开始下一个字符
elif x == '>':
text_flag = True # 下一个x可能是数据
#print piece
# 遇到> 表示piece 完全的结束了, 这个时候才是比较piece的最佳位置而不是在后面
if piece.lower().startswith('script'):
get_into_script_tag = True # 已经找到一个script标记
script_start = True # 脚本可以算作是开始了
script_end = False # 脚本还没有结束,因为没有发现</script>
text_flag = False # 脚本标记内的都不能算作数据
#print '进入Script区域'
elif piece.lower() =='/script':
get_into_script_tag = False # 即将要离开了script标记了, 所以为假
script_start = False # 理由同上
script_end = True # 同上
text_flag = True # 可能要进入数据区
#print '离开script区域'
elif piece.lower().startswith('style'):
get_into_style_tag = True
style_start = True
style_end = False
text_flag = False
#print '进入style区域'
elif piece.lower() =='/style':
get_into_style_tag = False
style_start = False
style_end = True
text_flag = True
#print '离开style区域'
elif piece.lower() =='br /' or piece.lower() =='br ' or piece.lower() =='br' or piece.lower() == 'p' or piece.lower() == '/p':
#print piece*5
data.append('\n')
if get_into_script_tag: # 如果x的上一个位置是脚本标签内, 那么
text_flag = False # 脚本标记中的内容不是数据
if get_into_style_tag:
text_flag = False
start_tag = False # 结束标记
end_tag = True
#print 'piece:','-'*10,piece
#log.append("----标记结束了\n".decode('utf-8'))
#print x,text_flag,repr(x)
continue
else: #(2)进入了标记内部 或者数据内部
#piece+=x
#print 'piece:',piece
# 开始判断piece, 从而判断text_flag
#print "可能是数据,也可能不是"
# piece=其他的字符串的情况, 主要是 scr sc h ta table之类不完整的片段
get_into_script_tag = False
get_into_style_tag = False
#text_flag = False
if start_tag: # 前面出现了< , 后面的x就是<之后, >之前的, 也就是<...>之间的数据, 因此, 不需要记录
#print 'text flag',text_flag
piece+=x
if end_tag: # 前面结束标记 > 出现了,进入数据区了
# 这个时候, piece不用记录<...>之间的片段了
if script_start: # 找到了script 并且 那么就会进入script内部, 这些不是数据, 直接下一个字符
text_flag = False
if style_start:# 找到了style 并且 那么就会进入script内部, 这些不是数据, 直接下一个字符
text_flag = False
if text_flag:# >为真, < 为假, >...< 之间的才是数据
data.append(x)
aaa=''.join(data).strip() \
.replace(' ',' ') \
.replace('<','<')\
.replace('>','>')\
.replace('"','"') \
.replace('&','&') \
.replace('\r','\n')
l2=len(aaa)
#print repr(aaa)
#print aaa
#print "原网页长度 %d ,清理后 %d, 压缩比%0.2f%%" % (l1,l2, 100.00 * l2/l1)
return aaa
def buildnewurl(org_url, newpath):
#print org_url,newpath
if newpath.startswith('http://') or newpath.startswith('https://') :
return newpath
scheme, netloc, path, parameters, query, fragment = urllib2.urlparse.urlparse(org_url)
if not newpath.startswith(scheme):
#newurl = urllib2.urlparse.urlunparse((scheme, netloc, newpath, '', '', ''))
newurl = urllib2.urlparse.urljoin(org_url, newpath)
else:
newurl = newpath
return newurl
def html2markdown(html, baseurl=None):
def cut_tail_whitespaces(txt):
line_list=txt.split('\n')
result=[]
for x in line_list:
result.append(x.rstrip())
return '\n'.join(result)
if len(html)==0:
return ''
l1=len(html)
data=[]
piece='' # <> 之间的片段
href_patten='href\s?=\s?[\'\"]?([#\w\-\:_/\.\&]+)[\'\"]?\s?'
hp=re.compile(href_patten)
url_href=''
url_title=''
url_text=''
url_flag=False
# 对x当前所处的位置的全局判断
get_into_script_tag = False #是不是在script标记内
get_into_style_tag = False #是不是在style标记内
text_flag = True # 数据标记
start_tag = False # <
end_tag = False # >
script_start = False
script_end = False
style_start = False
style_end = False
pre_flag = False
errors=[]
# 一个一个字符开始探测, 可以称之为流式探测, string flow
i=-1
for x in html:
i=i+1
errors.append(x)
if x == '<':
start_tag = True # 开始进入标签
end_tag = False # 不是结束标签
text_flag = False # 下一个x不是数据
piece='' # piece 不记录<>这两个符号,因为遇到<, 表示开始了一个新的piece, 这个时候piece重新开始记录<...>之间的符号
#errors.append("标记开始了----".decode('utf-8'))
#print x,text_flag,repr(x)
continue # (1)进入了标记, 开始下一个字符
elif x == '>':
text_flag = True # 下一个x可能是数据
#print piece
###################################
#try:
# piece=piece.strip().split()[0] # pre class="prettyprint lang-py" 这种提取出pre
#except:
# # piece = '' # 即, <> 内无字符的情况, 不进行后续的标记判断
# errors.append(', '.join([ x,str(i),'piece',repr(piece)]))
# continue
###################################
# 遇到> 表示piece 完全的结束了, 这个时候才是比较piece的最佳位置而不是在后面
if piece.lower().startswith('script'):
get_into_script_tag = True # 已经找到一个script标记
script_start = True # 脚本可以算作是开始了
script_end = False # 脚本还没有结束,因为没有发现</script>
text_flag = False # 脚本标记内的都不能算作数据
#print '进入Script区域'
elif piece.lower() =='/script':
get_into_script_tag = False # 即将要离开了script标记了, 所以为假
script_start = False # 理由同上
script_end = True # 同上
text_flag = True # 可能要进入数据区
#print '离开script区域'
elif piece.lower().startswith('style'):
get_into_style_tag = True
style_start = True
style_end = False
text_flag = False
#print '进入style区域'
elif piece.lower() =='/style':
get_into_style_tag = False
style_start = False
style_end = True
text_flag = True
#print '离开style区域'
elif piece.lower() =='br /' or piece.lower() =='br ' or piece.lower() =='br' or piece.lower() == 'p' or piece.lower() == '/p':
#print piece*5
if pre_flag:
data.append('\n ')
else:
data.append('\n')
elif piece.lower().startswith('pre'):
#print piece*5
data.append('\n ')
pre_flag = True
elif piece.lower() == '/pre':
#print piece*5
data.append('\n ')
pre_flag = False
elif piece.lower().startswith('a '):
hrefs = hp.findall(piece)
if len(hrefs)==1:
if baseurl!=None:
if baseurl.startswith('http'):
url_href=buildnewurl(baseurl,hrefs[0])
else:
url_href= hrefs[0]
else:
url_href= hrefs[0]
#print piece*5
url_flag = True
data.append('[')
elif piece.lower() == '/a':
#print piece*5
if url_href:
data.append('](%s)' % url_href)
else:
data.append(']')
url_flag = False
url_text = ''
url_href = ''
elif piece.lower() == 'li':
#print piece*5
if pre_flag:
pass
else:
data.append(' - ')
elif piece.lower() == '/li':
#print piece*5
if pre_flag:
data.append('\n ')
elif piece.lower() == 'em':
data.append('*')
elif piece.lower() == '/em':
data.append('*')
elif piece.lower() == 'strong':
data.append('**')
elif piece.lower() == '/strong':
data.append('**')
elif piece.lower() == 'h1':
data.append('\n# ')
elif piece.lower() == '/h1':
data.append(' #\n')
elif piece.lower() == 'h2':
data.append('\n## ')
elif piece.lower() == '/h2':
data.append(' ##\n')
elif piece.lower() == 'h3':
data.append('\n### ')
elif piece.lower() == '/h3':
data.append(' ###\n')
elif piece.lower() == 'h4':
data.append('\n#### ')
elif piece.lower() == '/h4':
data.append(' ####\n')
if get_into_script_tag: # 如果x的上一个位置是脚本标签内, 那么
text_flag = False # 脚本标记中的内容不是数据
if get_into_style_tag:
text_flag = False
start_tag = False # 结束标记
end_tag = True
#print 'piece:','-'*10,piece
#errors.append("----标记结束了\n".decode('utf-8'))
#print x,text_flag,repr(x)
continue
else: #(2)进入了标记内部 或者数据内部
#piece+=x
#print 'piece:',piece
# 开始判断piece, 从而判断text_flag
#print "可能是数据,也可能不是"
# piece=其他的字符串的情况, 主要是 scr sc h ta table之类不完整的片段
get_into_script_tag = False
get_into_style_tag = False
#text_flag = False
if start_tag: # 前面出现了< , 后面的x就是<之后, >之前的, 也就是<...>之间的数据, 因此, 不需要记录
#print 'text flag',text_flag
piece+=x
if end_tag: # 前面结束标记 > 出现了,进入数据区了
# 这个时候, piece不用记录<...>之间的片段了
if script_start: # 找到了script 并且 那么就会进入script内部, 这些不是数据, 直接下一个字符
text_flag = False
if style_start:# 找到了style 并且 那么就会进入script内部, 这些不是数据, 直接下一个字符
text_flag = False
if text_flag:# >为真, < 为假, >...< 之间的才是数据
if pre_flag==True and x == '\n':
data.append('%s ' % x)
else:
data.append(x)
aaa=''.join(data).strip() \
.replace(' ',' ') \
.replace('<','<')\
.replace('>','>')\
.replace('"','"') \
.replace('&','&') \
.replace('‘',u'‘') \
.replace('’',u'’') \
.replace('“',u'“') \
.replace('”',u'”') \
.replace('…',u'…') \
.replace('—',u'—') \
.replace('\r','\n') \
.replace('\n\n\n','\n\n') \
.replace('\n\n\n','\n\n')
l2=len(aaa)
errors.append( "原网页长度 %d ,清理后 %d, 压缩比%0.2f%%" % (l1,l2, 100.00 * l2/l1))
return cut_tail_whitespaces(aaa)
| {"/nlp/extractors/html.py": ["/nlp/utils/html2txt.py"]} |
62,301 | encorehu/nlp | refs/heads/master | /nlp/extractors/html.py | # -*- coding: utf-8 -*-
import re
from base import BaseExtractor, BaseRegexExtractor
from nlp.utils.html2txt import html2markdown
class LinkExtractor(BaseRegexExtractor):
regex = '<\s*[Aa]{1}\s+[^>]*?[Hh][Rr][Ee][Ff]\s*=\s*[\"\']?([/:_;=\w\&\?\%\+\-\.\(\)]+)[\"\']?\s*.*?>(.*?)</[Aa]{1}>'
def extract(self, html):
r=super(LinkExtractor, self).extract(html)
return r
class MarkdownExtractor(BaseExtractor):
errors=[]
def extract(self, html, baseurl=None):
return html2markdown(html, baseurl=baseurl)
| {"/nlp/extractors/html.py": ["/nlp/utils/html2txt.py"]} |
62,309 | remodoy/reauth-python | refs/heads/master | /reauth/__init__.py | from .reauth import get_public_key, fetch_reauth_token, decode_reauth_token
__all__ = [
'get_public_key'
'fetch_reauth_token'
'decode_reauth_token'
]
| {"/reauth/__init__.py": ["/reauth/reauth.py"]} |
62,310 | remodoy/reauth-python | refs/heads/master | /setup.py | #!/usr/bin/env python
from distutils.core import setup
setup(name='reauth',
version='0.1',
description='Library for ReAuth authentication',
keywords='reauth authentication',
author='Remod Oy',
author_email='reauth@remod.fi',
url='https://github.com/remodoy/reauth-python',
packages=['reauth'],
license="MIT",
package_dir={'reauth': 'reauth'},
requires=["python_jwt (>=2.0.0)", "PyCrypto"],
classifiers=[
'Development Status :: 3 - Alpha',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
],
)
| {"/reauth/__init__.py": ["/reauth/reauth.py"]} |
62,311 | remodoy/reauth-python | refs/heads/master | /reauth/reauth.py | import json
import ssl
import time
import urllib.request
from datetime import timedelta
import logging
import Crypto.PublicKey.RSA
import python_jwt as jwt
_accepted_sign_algs = ["PS512"]
_pubkey_cache_living_time = 60*10 # 10min
_pubkey_cache_exp_time = 0
_pubkey_cache = ""
_iat_skew = timedelta(minutes=5)
logger = logging.getLogger("reauth")
def get_public_key(reauth_url, verify=True):
"""
Get ReAuth server public key from server.
It's recommended in production setup to store public key locally for example in configuration.
:param reauth_url: ReAuth server base url. E.g. https://reauth.example.com
:param verify: Verify TLS, default value is True
:return: Public key in text format
"""
logger.debug("get_public_key(%s, verify=%s" % (reauth_url, verify))
global _pubkey_cache_exp_time, _pubkey_cache, _pubkey_cache_living_time
if time.time() < _pubkey_cache_exp_time:
public_key = _pubkey_cache
else:
ctx = ssl.create_default_context()
ctx.check_hostname = verify
with urllib.request.urlopen(reauth_url + "/key.pub", timeout=15, context=ctx) as f:
public_key = f.read()
_pubkey_cache = public_key
_pubkey_cache_exp_time = time.time() + _pubkey_cache_living_time
return public_key
def fetch_reauth_token(code, reauth_url, verify=True):
"""
Fetch ReAuth token from ReAuth server using code passed in redirect.
:param code: Code
:param reauth_url: ReAuth server base url. E.g. https://reauth.example.com
:param verify: Verify TLS, default value is True
:return: Token in text format
"""
logger.debug("fetch_reauth_token(%s, %s, verify=%s" % (code, reauth_url, verify))
ctx = ssl.create_default_context()
ctx.check_hostname = verify
with urllib.request.urlopen(reauth_url.rstrip("/") + "/api/v1/token/" + code + "/", timeout=15, context=ctx) as f:
data = json.loads(f.read().decode("utf-8"))
if 'jwtToken' in data:
return data['jwtToken']
return None
def decode_reauth_token(token, public_key):
"""
Decode and verify ReAuth token
:param token: Token in text format
:param public_key: Server public key.
:return: Dictionary containing Claims from token
"""
logger.debug("decode_reauth_token(%s, %s)" % (token, public_key))
public_key = Crypto.PublicKey.RSA.importKey(public_key)
header, claims = jwt.verify_jwt(token, pub_key=public_key, allowed_algs=_accepted_sign_algs, iat_skew=_iat_skew)
return claims
| {"/reauth/__init__.py": ["/reauth/reauth.py"]} |
62,313 | danielamonteiro/intention-tester | refs/heads/master | /skills/skill_selector.py | import json
import os
def get_skill_credentials():
skill_list = get_skills_list()
skill_to_use = choose_skill(skill_list)
actual_path = os.path.abspath(os.path.dirname(__file__))
service_credentias_file = f"{actual_path}/service_credentials.json"
with open(service_credentias_file) as json_file:
skill_list_file = json.load(json_file)['skills']
skill_credentials = next((skill for skill in skill_list_file if skill['name'] == skill_to_use), None)
json_file.close()
try:
skill_version = skill_credentials['skill_version']
skill_id = skill_credentials['skill_id']
skill_username = skill_credentials['skill_username']
skill_password = skill_credentials['skill_password']
skill_url = skill_credentials['skill_url']
return skill_version, skill_id, skill_username, skill_password, skill_url
except:
print("[ATENÇÃO] Erro ao tentar pegar as credenciais do skill selecionado. Verifique se as informações estão preenchidas corretamente.")
def get_skills_list():
try:
actual_path = os.path.abspath(os.path.dirname(__file__))
service_credentias_file = f"{actual_path}/service_credentials.json"
with open(service_credentias_file) as json_file:
skill_list_file = json.load(json_file)
skill_list = []
for skill in skill_list_file['skills']:
skill_list.append(skill['name'])
json_file.close()
return skill_list
except:
print("[ATENÇÃO] Não foi possível carregar a lista de skills. Por favor, verifique se o arquivo service_credentials.json está preenchido corretamente.")
def choose_skill(skill_list):
valid_skill = False
while valid_skill == False:
try:
print("Escolha o skill que você deseja usar (escolha o número): ")
for skill in skill_list:
index = skill_list.index(skill)+1
print(f"{index} - {skill}")
skill_input = int(input("Skill: "))
chosen_index = skill_input-1
if chosen_index not in range(len(skill_list)):
print("[ATENÇÃO] Skill selecionado inválido, escolha outro.\n")
else:
chosen_skill = skill_list[chosen_index]
print("Skill selecionado:", chosen_skill)
valid_skill = True
return chosen_skill
except:
print("[ATENÇÃO] Erro ao selecionar o skill. Tente novamente.\n") | {"/run.py": ["/files/manipulate_files.py", "/nlp_services/watson.py"], "/nlp_services/watson.py": ["/skills/skill_selector.py"]} |
62,314 | danielamonteiro/intention-tester | refs/heads/master | /run.py | import os
from files.manipulate_files import get_test_files, create_file, edit_file
from nlp_services.watson import get_watson_response, check_intent
def generate_results():
try:
result_file = create_file()
list_to_test = get_test_files()
result_list = []
for utterance in list_to_test:
print("Testando Utterance:", utterance[0])
watson_response = get_watson_response(utterance[0])
utterance_result = check_intent(watson_response, utterance[1])
result_list.append(utterance_result)
edit_file(result_file, result_list)
print(f"Testes finalizados! ;)\nVerifique os resultados em 'files/result_files/{result_file}.xlsx'")
except KeyboardInterrupt:
print("[ATENÇÃO] Aplicação parada pelo usuário")
except Exception as e:
print("[ERRO]", e)
if __name__ == "__main__":
generate_results()
| {"/run.py": ["/files/manipulate_files.py", "/nlp_services/watson.py"], "/nlp_services/watson.py": ["/skills/skill_selector.py"]} |
62,315 | danielamonteiro/intention-tester | refs/heads/master | /files/manipulate_files.py | import os.path
import os
from os import path
import openpyxl
from openpyxl import utils
from openpyxl.styles import Font
from openpyxl.chart import PieChart, Reference
def create_file():
name_file = input("Insira o nome do arquivo que será gerado com os resultados dos testes: ")
actual_path = os.path.abspath(os.path.dirname(__file__))
result_files_path = os.path.join(actual_path, f"../files/result_files/{name_file}.xlsx")
while path.exists(result_files_path) == True:
name_file = input(f"O arquivo '{name_file}' já existe. Escolha outro nome: ")
result_files_path = os.path.join(actual_path, f"../files/result_files/{name_file}.xlsx")
try:
wb = openpyxl.Workbook()
ws = wb.active
ws.title = "Resultados"
wb.save(result_files_path)
print(f"Arquivo '{name_file}.xlsx' criado com sucesso!")
except:
print(f"[ATENÇÃO] Erro ao criar o arquivo '{name_file}.xlsx'")
return name_file
def edit_file(name_file, result_list):
actual_path = os.path.abspath(os.path.dirname(__file__))
result_file_path = os.path.join(actual_path, f"../files/result_files/{name_file}.xlsx")
try:
wb = openpyxl.load_workbook(result_file_path)
ws = wb.active
headers = ["Utterance","Intenção Esperada","Intenção Retornada","Confiança","Resultado"]
ws.append(headers)
row = ws.row_dimensions[1]
row.font = Font(bold=True)
for result in result_list:
ws.append(result)
ws.auto_filter.ref = f"A1:E{len(result_list)}"
ws_result = utils.quote_sheetname(ws.title).replace("'", "")
wb.save(result_file_path)
create_chart_file(name_file, ws_result)
except Exception as e:
print("[ERRO]", e)
def get_test_files():
utterances_list = []
actual_path = os.path.abspath(os.path.dirname(__file__))
test_files_path = os.path.join(actual_path, f"../files/test_files/")
try:
for test_files in os.listdir(test_files_path):
if test_files.endswith(".txt"):
expected_intent = test_files[:-4]
actual_file = open(f"{test_files_path}/{test_files}", "r")
file_to_test = actual_file.read().splitlines()
actual_file.close()
for utterance in file_to_test:
utterances_list.append([utterance, expected_intent])
return utterances_list
except Exception as e:
print("[ERRO]", e)
def create_chart_file(name_file, ws_result):
actual_path = os.path.abspath(os.path.dirname(__file__))
result_file_path = os.path.join(actual_path, f"../files/result_files/{name_file}.xlsx")
try:
wb = openpyxl.load_workbook(result_file_path)
chart_worksheet = wb.create_sheet(title="Gráfico")
chart_worksheet['A1'] = "Total Sucess"
chart_worksheet['A2'] = "Total Failed"
chart_worksheet['B1'] = f'=COUNTIF(${ws_result}.E:E;"Sucess")'
chart_worksheet['B2'] = f'=COUNTIF(${ws_result}.E:E;"Failed")'
pie_chart = PieChart()
values = Reference(chart_worksheet, min_col=1, min_row = 1,
max_col=2, max_row= 2)
labels = Reference(chart_worksheet, min_col=1, min_row = 1,
max_col=2, max_row= 2)
pie_chart.add_data(values, titles_from_data = True)
pie_chart.set_categories(labels)
chart_worksheet.add_chart(pie_chart, "A1")
wb.save(result_file_path)
except Exception as e:
print(e) | {"/run.py": ["/files/manipulate_files.py", "/nlp_services/watson.py"], "/nlp_services/watson.py": ["/skills/skill_selector.py"]} |
62,316 | danielamonteiro/intention-tester | refs/heads/master | /nlp_services/watson.py | import time
import ibm_watson
import sys
sys.path.append("..")
from skills.skill_selector import get_skill_credentials, get_skills_list
version, skill_id, username, password, url = get_skill_credentials()
def watson_conversation(username, password, version):
conversation = ibm_watson.AssistantV1(username=username, password=password, version=version)
return conversation
def get_watson_response(utterance):
try_again = True
retry = 0
while try_again == True:
try:
if retry > 4:
print("[ATENÇÃO] Não estou conseguindo me conectar com o Watson. Por favor, verifique se as informações do arquivo 'service_credentials.json' está preenchido corretamente e tente novamente")
break
conversation = watson_conversation(username, password, version)
response = conversation.message(skill_id, input={'text': utterance}).get_result()
try_again == False
break
except:
print("Não consegui me conectar com o Watson, 1 segundo vou tentar de novo, pera aí...")
time.sleep(1)
retry = retry+1
return response
def check_intent(response, expected_intent):
if 'input' in response:
utterance = response['input']['text']
else:
utterance = ""
if len(response['intents']) > 0:
watson_intent = str(response['intents'][0]['intent'])
watson_confidence = str(round(float(response['intents'][0]['confidence']),2))
else:
watson_intent = 'Irrelevant'
watson_confidence = '0'
if expected_intent == watson_intent:
result = "Sucess"
else:
result = "Failed"
result_list = [utterance, expected_intent, watson_intent, watson_confidence, result]
return result_list | {"/run.py": ["/files/manipulate_files.py", "/nlp_services/watson.py"], "/nlp_services/watson.py": ["/skills/skill_selector.py"]} |
62,321 | KIZI/actionrules | refs/heads/master | /actionrules/utilityMining/__init__.py | from .utilityMining import * | {"/actionrules/utilityMining/__init__.py": ["/actionrules/utilityMining/utilityMining.py"]} |
62,322 | KIZI/actionrules | refs/heads/master | /actionrules/utilityMining/utilityMining.py | import pandas as pd
from typing import List
class UtilityMining:
"""
The class UtilityMining contains methods for calculating utility of classification rules
This feature can be skipped. In this case, the UtilityMining class is not initialized.
...
Attributes
----------
utility_function : Function
Function providing utility values.
utility_table : pd.DataFrame
DataFrame providing utility values.
min_util_dif: float
Number representing minimal desired change in utility caused by action.
Methods
-------
calculate_utilities(self,
flex: List[pd.DataFrame],
target: List[pd.DataFrame]):
Calculates utility for single classification rule.
"""
def __init__(self,
utility_source,
min_util_dif: float,
min_profit: float
):
"""Initialise.
Parameters
----------
utility_source : Function or pd.DataFrame
Function or DataFrame providing utility values.
min_util_dif: float
Number representing minimal desired change in utility caused by action.
min_profit: float
Number representing minimal profit.
"""
self.utility_function = None
self.utility_table = None
if isinstance(utility_source, pd.DataFrame):
self.utility_table = utility_source
elif callable(utility_source):
self.utility_function = utility_source
self.min_util_dif = min_util_dif
self.min_profit = min_profit
def _is_source_defined(self):
return isinstance(self.utility_table, pd.DataFrame) or callable(self.utility_function)
def use_utility_mining(self):
return self.use_min_dif() or self.use_min_profit()
def use_min_dif(self):
return self.min_util_dif is not None and self._is_source_defined()
def use_min_profit(self):
return self.min_profit is not None and self._is_source_defined()
# def _check_utility(self, utility):
# """ Checks if the utility values are nonnegative. If they are, they are returned,
# if they are not, 0 is returned and warning is printed out.
#
# Parameters
# ----------
# utility : float
# Utility value that is currently being checked.
#
# Returns
# -------
# float
# Returns particular utility value or 0.
# """
# if utility >= 0:
# return utility
# print('Warning - utility cannot be negative - negative value have been replaced by 0.')
# return 0
def _get_utility(self, **kwargs):
"""Returns sum of utilities of input parameters, checks utility values and takes
utility values from correct source.
Parameters
----------
**kwargs : Dictionary
Dictionary of arguments and argument values of classification rule.
Returns
-------
float
Returns particular sum of utility values.
"""
if callable(self.utility_function):
utility = 0
for key, value in kwargs.items():
param = {}
param[key] = value
utility += self.utility_function(**param)
return utility
if isinstance(self.utility_table, pd.DataFrame):
utility = 0
for key, value in kwargs.items():
index = key + '_' + value
try:
utility += self.utility_table.at[index, 1]
except KeyError:
print('Warning - key error at index ', index)
return utility
return 0
def calculate_utilities(self, flex: List[pd.DataFrame], target: List[pd.DataFrame]):
"""For list of flexible and target attributes creates list of utilities.
Parameters
----------
flex: List[pd.DataFrame]
List with flexible attributes.
target: List[pd.DataFrame])
List with target attributes.
Returns
-------
List
Returns list of utilities.
"""
if not self._is_source_defined():
raise Exception('No utility source defined')
utilities_flex = []
utilities_target = []
for i in range(len(flex)):
# utility of flexible attributes
params = {}
for col in flex.columns:
val = str(flex.at[i, col]).lower()
if val != 'nan':
params[col] = val
util_flex = self._get_utility(**params)
# utility of target attribute
params = {}
col = target.columns[0]
val = target.at[i, col].lower()
params[col] = val
util_target = self._get_utility(**params)
# adds tuple of flexible and target utilities to the return lists
utilities_flex.append(float(util_flex))
utilities_target.append(float(util_target))
return utilities_flex, utilities_target
| {"/actionrules/utilityMining/__init__.py": ["/actionrules/utilityMining/utilityMining.py"]} |
62,323 | KIZI/actionrules | refs/heads/master | /setup.py | import setuptools
import re
def get_property(prop, project):
result = re.search(r'{}\s*=\s*[\'"]([^\'"]*)[\'"]'.format(prop), open(project + '/__init__.py').read())
return result.group(1)
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name="actionrules-lukassykora",
version=get_property('__version__', "actionrules"),
author="Lukas Sykora",
author_email="lukassykora@seznam.cz",
description="Action rules mining package",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/lukassykora/actionrules",
packages=setuptools.find_packages(),
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
python_requires='>=3.6',
install_requires=[
'pandas',
'numpy',
'pyfim'
],
) | {"/actionrules/utilityMining/__init__.py": ["/actionrules/utilityMining/utilityMining.py"]} |
62,324 | soulweaver91/soulbot-slack | refs/heads/master | /services/words.py | import random
dictionary = [line.rstrip('\r\n') for line in open('data/common/wordpool.txt', encoding="utf-8")] + \
[line.rstrip('\r\n') for line in open('data/common/wordpool_addendum.txt', encoding="utf-8")]
class WordService:
def get_random_word(self):
"""
Gets a random word from the dictionary.
:return: A random word as a string.
"""
return random.sample(dictionary, 1)[0]
| {"/plugins/shipping.py": ["/services/words.py", "/services/users.py"], "/plugins/hello.py": ["/services/users.py"]} |
62,325 | soulweaver91/soulbot-slack | refs/heads/master | /rtmbot.py | #!/usr/bin/env python
from rtmbot.bin.run_rtmbot import main as rtmbotMain
class FakeArgs:
def __getattr__(self, item):
if item == 'config':
return 'soulbot.conf'
raise ValueError
if __name__ == "__main__":
rtmbotMain(FakeArgs())
| {"/plugins/shipping.py": ["/services/words.py", "/services/users.py"], "/plugins/hello.py": ["/services/users.py"]} |
62,326 | soulweaver91/soulbot-slack | refs/heads/master | /plugins/downcheck.py | import requests
import re
from rtmbot.core import Plugin
API_URL = r'https://downforeveryoneorjustme.com/'
class UnexpectedResponseError(RuntimeError):
pass
class SoulbotDownCheckPlugin(Plugin):
def isup_site(self, data):
"""
Checks whether the given domain (or URL) is down by using the isup.me site.
:param data: RTM message.
:return: None
"""
if len(data["soulbot_args_shlex"]) > 0:
location = data["soulbot_args_shlex"][0]
# Remove the protocol from the request, as isup.me doesn't understand them properly,
# and then remove everything starting from the first slash
# urllib.parse.urlparse doesn't work well for us here because if no protocol is specified
# it won't parse the domain as its own (because it seems like a relative url instead)
# Also, Slack's link format probably kicks in with most links anyway, which we're also dealing here.
location = re.sub(r'^<([^|]+)(\|.+?>)?', r'\1', location)
location = re.sub(r'^[a-z]+://', '', location)
location = re.sub(r'/.+', '', location)
r = requests.get(API_URL + requests.utils.quote(location))
try:
if r.status_code == 200:
status = r.text
if "It's just you" in status or "If you can see this page and still think we're down" in status:
return self.outputs.append([data["channel"],
'{} seems to be *up*, so it\'s just you. :+1:'.format(location)])
elif "looks down from here" in status:
return self.outputs.append([data["channel"],
'{} seems to be *down*, not just for you. :-1:'.format(location)])
raise UnexpectedResponseError
except (KeyError, AttributeError, UnexpectedResponseError):
return self.outputs.append([data["channel"], 'ERROR: Check failed :cry: Please try again later!'])
else:
return self.outputs.append([data["channel"], 'Please tell me the site to check first.'])
def process_message(self, data):
if data["soulbot_command"] in ['isup', 'isdown', 'down']:
return self.isup_site(data)
def get_module_help():
return '\n'.join([
'`!down domain`, `!isup domain` or `!isdown domain`: Check if a domain is accessible from another network '
'than your own.'
])
| {"/plugins/shipping.py": ["/services/words.py", "/services/users.py"], "/plugins/hello.py": ["/services/users.py"]} |
62,327 | soulweaver91/soulbot-slack | refs/heads/master | /plugins/choices.py | import random
from rtmbot.core import Plugin
helpful_replies = [line.rstrip('\r\n') for line in open('data/choices/helpfuls.txt')]
hazy_replies = [line.rstrip('\r\n') for line in open('data/choices/hazies.txt')]
helpful_ratio = 0.95
class SoulbotChoicesPlugin(Plugin):
def random_choice(self, data):
"""
Chooses a random item from a slash-delimitered list.
:param data: RTM message.
:return: None
"""
items = list(filter(None, [s.strip() for s in data["soulbot_args_shlex"]]))
if len(items) > 1 and random.random() > helpful_ratio:
reply = random.choice(hazy_replies)
else:
reply = random.choice(helpful_replies)
if len(items) > 1:
return self.outputs.append([data["channel"], reply.format(random.choice(items))])
elif len(items) == 1:
return self.outputs.append([data["channel"], reply.format('Try more options next time')])
else:
return self.outputs.append([data["channel"], reply.format('Give me something to choose from first')])
def throw_dice(self, data):
"""
Throws an n-sided die.
:param data: RTM message
:return: None
"""
if len(data["soulbot_args_space"]) > 0:
try:
sides = int(data["soulbot_args_space"][0])
except (TypeError, ValueError):
return self.outputs.append([data["channel"], "That doesn't seem like a number of sides."])
else:
sides = 6
if sides <= 0:
return self.outputs.append([data["channel"], "Please provide a positive number of sides."])
return self.outputs.append([data["channel"], "You rolled a {}!".format(random.randint(1, sides))])
def process_message(self, data):
if data["soulbot_command"] in ['choose', 'choice']:
return self.random_choice(data)
elif data["soulbot_command"] in ['die', 'dice']:
return self.throw_dice(data)
def get_module_help():
return '\n'.join([
'`!choice` or `!choose`: Pick a random item. Use quotes for choices with multiple words.',
'`!die` or `!dice`: Throw a die. Optionally, provide the number of sides: `!die 20`'
])
| {"/plugins/shipping.py": ["/services/words.py", "/services/users.py"], "/plugins/hello.py": ["/services/users.py"]} |
62,328 | soulweaver91/soulbot-slack | refs/heads/master | /plugins/weather.py | import requests
import json
from rtmbot.core import Plugin, Job
API_URL = r'https://api.wunderground.com/api/'
class UnexpectedResponseError(RuntimeError):
pass
class ClearCooldownJob(Job):
parent = None
def __new__(cls, parent_plugin, interval):
return super(ClearCooldownJob, cls).__new__(cls)
def __init__(self, parent_plugin, interval):
super().__init__(interval)
self.parent = parent_plugin
def run(self, slack_client):
self.parent.api_cooldown = 0
print("cooldown cleared")
class SoulbotWeatherPlugin(Plugin):
api_cooldown = 0
def __init__(self, name=None, slack_client=None, plugin_config=None):
super().__init__(name=name, slack_client=slack_client, plugin_config=plugin_config)
self.weather_config = plugin_config
def register_jobs(self):
self.jobs.append(ClearCooldownJob(self, 120))
@staticmethod
def get_forecast_emoji(condition_str):
if condition_str in ['chanceflurries', 'flurries', 'chancesnow', 'snow', 'chancesleet', 'sleet']:
return ':snow_cloud:'
elif condition_str in ['chancerain', 'rain']:
return ':rain_cloud:'
elif condition_str in ['chancetstorms', 'tstorms']:
return ':thunder_cloud_and_rain:'
elif condition_str in ['clear', 'sunny']:
return ':sunny:'
elif condition_str == 'cloudy':
return ':cloud:'
elif condition_str in ['fog', 'hazy']:
return ':fog:'
elif condition_str == 'mostlycloudy':
return ':barely_sunny:'
elif condition_str == 'mostlysunny':
return ':mostly_sunny:'
elif condition_str in ['partlycloudy', 'partlysunny']:
return ':partly_sunny:'
else:
return ':question:'
@staticmethod
def get_forecast_dict(title, conditions_icon, conditions, temp_low_c, temp_low_f, wind_dir, wind_kph, wind_mph,
prec_mm, prec_in, temp_high_c=None, temp_high_f=None):
if temp_high_c is not None and temp_high_f is not None:
temp_str = '{}–{}°C ({}–{}°F)'.format(temp_low_c, temp_high_c, temp_low_f, temp_high_f)
else:
temp_str = '{}°C ({}°F)'.format(temp_low_c, temp_low_f)
return {
"title": title,
"value": '{} {}, {} :dash: From {} {}kph ({}mph) :droplet: {}mm ({}in)'.format(
SoulbotWeatherPlugin.get_forecast_emoji(conditions_icon),
conditions,
temp_str,
wind_dir,
wind_kph,
wind_mph,
prec_mm,
prec_in
),
"short": False
}
@staticmethod
def get_forecast_dict_for_current(data):
return SoulbotWeatherPlugin.get_forecast_dict(
title='Today',
conditions=data["weather"],
conditions_icon=data["icon"],
temp_low_c=data["temp_c"],
temp_low_f=data["temp_f"],
wind_dir=data["wind_dir"],
wind_kph=data["wind_kph"],
wind_mph=data["wind_mph"],
prec_mm=data["precip_today_metric"],
prec_in=data["precip_today_in"]
)
@staticmethod
def get_forecast_dict_for_day(data):
return SoulbotWeatherPlugin.get_forecast_dict(
title=data["date"]["weekday"],
conditions=data["conditions"],
conditions_icon=data["icon"],
temp_low_c=data["low"]["celsius"],
temp_low_f=data["low"]["fahrenheit"],
temp_high_c=data["high"]["celsius"],
temp_high_f=data["high"]["fahrenheit"],
wind_dir=data["avewind"]["dir"],
wind_kph=data["avewind"]["kph"],
wind_mph=data["avewind"]["mph"],
prec_mm=data["qpf_allday"]["mm"],
prec_in=data["qpf_allday"]["in"]
)
def get_forecast(self, data):
"""
Fetches a five-day forecast from Wunderground API.
:param data: RTM message.
:return: None
"""
if "WUNDERGROUND_API_KEY" not in self.weather_config:
return self.outputs.append([data["channel"], 'ERROR: The weather plugin is improperly configured.'])
if self.api_cooldown >= 5:
return self.outputs.append([data["channel"],
'The weather API is rate limited. '
'Please wait for a moment before making another request.'])
if len(data["soulbot_args_shlex"]) > 0:
location = data["soulbot_args_shlex"][0]
r = requests.get(API_URL + self.weather_config["WUNDERGROUND_API_KEY"]
+ '/geolookup/conditions/forecast/q/'
+ location + '.json')
self.api_cooldown += 1
try:
if r.status_code == 200:
forecast = r.json()
if 'location' in forecast and 'forecast' in forecast and 'current_observation' in forecast:
found_location = forecast["location"]
location_name = ':flag-{}: {}{}, {} ({:.2f}°{}, {:.2f}°{})'.format(
found_location["country_iso3166"].lower(),
found_location["city"],
(", " + found_location["state"] if len(found_location["state"]) > 0 else ""),
found_location["country_name"],
abs(float(found_location["lat"])),
("N" if float(found_location["lat"]) > 0 else "S"),
abs(float(found_location["lon"])),
("E" if float(found_location["lon"]) > 0 else "W")
)
forecast_dicts = \
[self.get_forecast_dict_for_current(forecast["current_observation"])] + \
[self.get_forecast_dict_for_day(day_data) for day_data in
forecast["forecast"]["simpleforecast"]["forecastday"]]
message_json = json.dumps([
{
"fallback": "Required plain-text summary of the attachment. TODO.",
"title": location_name,
"title_link": found_location["wuiurl"],
"fields": forecast_dicts,
"footer": "Weather Underground",
"ts": int(forecast["current_observation"]["local_epoch"])
}
])
return self.slack_client.api_call('chat.postMessage', channel=data["channel"],
attachments=message_json, as_user=True)
elif 'response' in forecast and 'results' in forecast["response"]:
location_texts = [
'`zmw:{}`: :flag-{}: {}{}, {}'.format(
location["zmw"],
location["country_iso3166"].lower(),
location["name"],
(", " + location["state"] if len(location["state"]) > 0 else ""),
location["country_name"]
)
for location in forecast["response"]["results"]
]
return self.outputs.append([
data["channel"],
'Multiple locations named \'{}\' were found.\n'.format(location) +
'\n'.join(location_texts)
])
elif 'response' in forecast and 'error' in forecast["response"]:
if forecast["response"]["error"]["type"] == 'querynotfound':
return self.outputs.append([
data["channel"],
'No locations called \'{}\' were found.'.format(location)
])
raise UnexpectedResponseError
except (KeyError, AttributeError, UnexpectedResponseError) as e:
print(e)
return self.outputs.append([data["channel"],
'ERROR: Could not get the forecast :cry: Please try again later!'])
else:
return self.outputs.append([data["channel"], 'Please provide a location for the forecast first.'])
def process_message(self, data):
if data["soulbot_command"] == 'weather':
return self.get_forecast(data)
def get_module_help():
return '\n'.join([
'`!weather location` or `!weather zmw:00000.0.00000`: Get a five-day forecast in the given location. '
'You can either use a location\'s name directly or a specific location code shown if the location name '
'was ambiguous.'
])
| {"/plugins/shipping.py": ["/services/words.py", "/services/users.py"], "/plugins/hello.py": ["/services/users.py"]} |
62,329 | soulweaver91/soulbot-slack | refs/heads/master | /plugins/shipping.py | import re
import random
from services.words import WordService
from services.users import UserService
from rtmbot.core import Plugin
class SoulbotShippingPlugin(Plugin):
wordsvc = WordService()
def __init__(self, name=None, slack_client=None, plugin_config=None):
super().__init__(name=name, slack_client=slack_client, plugin_config=plugin_config)
self.usersvc = UserService(client=slack_client)
def process_message(self, data):
"""
Creates a shipping name out of users of the channel or the given names.
:param data: RTM message.
:return: None
"""
if data["soulbot_command"] == 'shipme':
args = data["soulbot_args_shlex"]
shipname = []
for i in range(0, random.randint(1, 3)):
shipname.append(self.wordsvc.get_random_word().capitalize())
shipname.append('Shipping')
shipname = ''.join(shipname)
if len(args) == 0:
args.append('2')
if re.match(r'^\d+$', args[0]) and len(args) == 1:
count = int(args[0])
if count <= 1:
return self.outputs.append([data["channel"],
'Please specify a number greater or equal to 2. :dansgame:'])
users = self.usersvc.get_channel_user_names(data["channel"])
own_name = self.usersvc.get_user_name(data["user"])
try:
users.remove(own_name)
except ValueError:
pass
self.outputs.append([data["channel"], ' × '.join(
[own_name] + random.sample(users, min(len(users), count - 1))
) + ' = ' + shipname])
elif len(args) > 1:
self.outputs.append([data["channel"], ' × '.join(args) + ' = ' + shipname])
else:
self.outputs.append([data["channel"], 'Please name a partner for {} first :lenny:'.format(args[0])])
def get_module_help():
return '\n'.join([
'`!shipme`: Creates a new random parody shipping name between you and another person on the channel. '
'Optionally, provide a number to ship that many people in total, including you, or provide a list of '
'names to ship instead.'
])
| {"/plugins/shipping.py": ["/services/words.py", "/services/users.py"], "/plugins/hello.py": ["/services/users.py"]} |
62,330 | soulweaver91/soulbot-slack | refs/heads/master | /plugins/startup.py | outputs = []
# Reserved for future use.
| {"/plugins/shipping.py": ["/services/words.py", "/services/users.py"], "/plugins/hello.py": ["/services/users.py"]} |
62,331 | soulweaver91/soulbot-slack | refs/heads/master | /services/users.py | class UserService:
def __init__(self, client=None):
self.sc = client
def get_user_name(self, user_id):
"""
Retrieves a user's real name by their internal ID.
:param user_id: The internal Slack user ID.
:return: The user's set up real name, or if some reason one cannot be found, the same user ID returned back.
"""
try:
user = self.sc.server.users.find(user_id)
return user.real_name
except AttributeError:
return user_id
def get_channel_user_names(self, channel_id):
"""
Retrieves the names of the users on a channel.
:param channel_id: The internal Slack channel ID.
:return: The real names of the users on the channel, if available. If the channel doesn't exist, an empty list.
"""
try:
channel_users = self.sc.server.channels.find(channel_id)
return [self.get_user_name(user) for user in channel_users.members]
except AttributeError:
return []
| {"/plugins/shipping.py": ["/services/words.py", "/services/users.py"], "/plugins/hello.py": ["/services/users.py"]} |
62,332 | soulweaver91/soulbot-slack | refs/heads/master | /plugins/help.py | import importlib
import os
from rtmbot.core import Plugin
class SoulbotHelpPlugin(Plugin):
def process_message(self, data):
"""
Retrieves the help text from each module and prints them.
The help text must be returned from a function called "get_module_help".
If the function is not found, no help for that module will be printed.
:param data: RTM message.
:return: None
"""
if data["soulbot_command"] == 'help':
help_strs = []
for module in os.listdir('plugins'):
if module[-3:] == '.py':
mod = importlib.import_module('plugins.' + module[:-3])
if getattr(mod, 'get_module_help', None) is not None:
help_strs.append('_Module \'' + module[:-3] + '\':_\n' + mod.get_module_help())
self.outputs.append([
data["channel"],
'*SoulBot Commands*\n' + '\n\n'.join(help_strs)
])
def get_module_help():
return '`!help`: Print this help.'
| {"/plugins/shipping.py": ["/services/words.py", "/services/users.py"], "/plugins/hello.py": ["/services/users.py"]} |
62,333 | soulweaver91/soulbot-slack | refs/heads/master | /plugins/hello.py | from services.users import UserService
from rtmbot.core import Plugin
class SoulbotHelloPlugin(Plugin):
def __init__(self, name=None, slack_client=None, plugin_config=None):
super().__init__(name=name, slack_client=slack_client, plugin_config=plugin_config)
self.usersvc = UserService(client=slack_client)
def process_message(self, data):
if "hello" in data["text"]:
self.outputs.append([
data["channel"],
"Hello, {}!".format(self.usersvc.get_user_name(data["user"]))
])
def get_module_help():
return 'Any message with \'hello\': Get greeted.'
| {"/plugins/shipping.py": ["/services/words.py", "/services/users.py"], "/plugins/hello.py": ["/services/users.py"]} |
62,342 | jkgiesler/polygon-test | refs/heads/master | /modified_models.py | from sqlalchemy import Column, Integer, String, Enum, Float, Text, DateTime, func, Boolean
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import relationship, backref
from sqlalchemy import ForeignKey
from sqlalchemy import create_engine
from passlib.apps import custom_app_context as pwd_context
Base = declarative_base()
class User(Base):
__tablename__ = 'user'
id = Column(Integer, primary_key=True)
date_joined = Column(DateTime, default=func.now())
email = Column(String(32), index=True)
password_hash = Column(String(64))
created_questions = relationship('Question')
question_attempts = relationship('Attempt')
def hash_password(self, password):
self.password_hash = pwd_context.encrypt(password)
def verify_password(self, password):
return pwd_context.verify(password, self.password_hash)
@property
def serialize(self):
return {
'id': self.id,
'date_joined': str(self.date_joined),
'email': self.email,
'password': self.password_hash,
#created questions needs to be a string not a dict
'created_questions': "|".join([str(i.serialize['id'])+" "+i.serialize['book_id'] for i in self.created_questions]),
'question_attempts': "|".join([str(i.serialize['question_id']) + " "+str(i.serialize['id']) + " "+str(i.serialize['correct']) for i in self.question_attempts])
}
class Attempt(Base):
__tablename__ = 'attempt'
id = Column(Integer, primary_key=True)
date_attempted = Column(DateTime, default=func.now())
guessed = Column(Integer, ForeignKey('option.id'))
question_id = Column(Integer, ForeignKey('question.id'))
user_id = Column(Integer, ForeignKey('user.id'))
correct = Column(Boolean)
@property
def serialize(self):
return {
'id': self.id,
'date_attempted': self.date_attempted,
'question_id': self.question_id,
'user_id': self.user_id,
'correct': self.correct
}
#Every book will have questions. These will be multiple choice and might have more than four options.
class Question(Base):
__tablename__ = 'question'
id = Column(Integer, primary_key=True)
book_id = Column(String(30)) #This will talk to the google books api and we can get the cover art and stuff.
create_by = Column(Integer, ForeignKey('user.id'))
date_modified = Column(DateTime, default=func.now()) #Do we need to keep track of modified AND created?
options = relationship('Option')
attempts = relationship('Attempt')
explanation = Column(String(1000)) #We might want to make this a separate table and include an optional picture link
likes = Column(Integer)
dislikes = Column(Integer)
@property
def serialize(self):
return {
'id': self.id,
'book_id': self.book_id,
'create_by': self.create_by,
'options': "\t".join([str(i.serialize['id']) for i in self.options]), # how is this really supposed to be?
'date_modified': str(self.date_modified),
'explanation': self.explanation,
'likes': self.likes,
'dislikes': self.dislikes
}
#Every option is related to a question and should be able to be edited or removed from that question.
class Option(Base):
__tablename__ = 'option'
id = Column(Integer, primary_key=True)
date_modified = Column(DateTime, default=func.now()) #Do we need to keep track of modified AND created?
question_id = Column(Integer, ForeignKey('question.id'))
correct = Column(Boolean)
content = Column(String(1000)) #We might want an optional picture link later.
@property
def serialize(self):
return {
'id': self.id,
'date_modified': str(self.date_modified),
'question_id': self.question_id,
'content': self.content
}
engine = create_engine('sqlite:///bookquestions.db', echo=True)
Base.metadata.create_all(engine) | {"/insertion.py": ["/modified_models.py"], "/modified_endpoints.py": ["/modified_models.py"]} |
62,343 | jkgiesler/polygon-test | refs/heads/master | /insertion.py | from flask import Flask, request, jsonify
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
from modified_models import User, Question, Option, Attempt
app = Flask(__name__)
Base = declarative_base()
engine = create_engine('sqlite:///bookquestions.db', echo=True)
#Base.metadata.create_all(engine)
Base.metadata.bind = engine
DBSession = sessionmaker(bind=engine)
session = DBSession()
user_one = User(email = 'jason.giesler@gmail.com')
user_one.hash_password('thisissecure')
user_two = User(email = 'bobloblaw@bobloblawlawblog.com')
user_two.hash_password('mouthfull')
question_one = Question(book_id="Bob Loblaw's Law Blog",create_by = 2,explanation = "How do I file for divorce?",likes = 30,dislikes = 10)
q1_option1 = Option(question_id = 1,content = "talk to a lawyer",correct = True)
q1_option2 = Option(question_id = 1, content = "talk to a pastor",correct = False)
question_two = Question(book_id="Bob Loblaw's Law Blog",create_by = 2,explanation = "How do I file a restraining order",likes = 5,dislikes = 50)
q2_option1 = Option(question_id = 2,content = "file a police report",correct = True)
q2_option2 = Option(question_id = 2,content = "buy a handgun",correct = False)
q1_attempt1 = Attempt(question_id = 1,user_id = 1, guessed = 1, correct = False)
q1_attempt2 = Attempt(question_id = 1,user_id = 1, guessed = 1, correct = False)
q1_attempt3 = Attempt(question_id = 1,user_id = 1, guessed = 2, correct = True)
q2_attempt1 = Attempt(question_id = 2,user_id = 1, guessed = 4, correct = False)
q2_attempt2 = Attempt(question_id = 2,user_id = 1, guessed = 3, correct = True)
session.add_all([user_one,user_two,question_one,question_two,
q1_option1,q1_option2,q2_option1,q2_option2,q1_attempt1,q1_attempt2,q1_attempt3,
q2_attempt1,q2_attempt2])
session.commit()
| {"/insertion.py": ["/modified_models.py"], "/modified_endpoints.py": ["/modified_models.py"]} |
62,344 | jkgiesler/polygon-test | refs/heads/master | /modified_endpoints.py | from flask import Flask, request, jsonify
import json
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
from modified_models import User, Question, Option, Attempt
app = Flask(__name__)
Base = declarative_base()
engine = create_engine('sqlite:///bookquestions.db', echo=True)
#Base.metadata.create_all(engine)
Base.metadata.bind = engine
DBSession = sessionmaker(bind=engine)
session = DBSession()
@app.route("/user/<userid>")
def user(userid):
return getUser(userid)
@app.route("/questions/<bookid>",methods = ['GET','POST'])
def book_questions(bookid):
if request.method == 'GET':
return getBookQuestions(bookid)
elif request.method == 'POST':
#Question(book_id="Bob Loblaw's Law Blog",,create_by = 2,explanation = "How do I file for divorce?",likes = 30,dislikes = 10)
data = request.form
print(data)
book_val = data['name']
explanation_val = data['question']
print(book_val)
print(explanation_val)
to_input = Question(book_id = book_val,create_by = 2,explanation = explanation_val,likes = 1,dislikes = 0)
session.add(to_input)
session.commit()
return "success"
@app.route("/options/<questionid>")
def question_options(questionid):
return getQuestionOptions(questionid)
'''
@app.route("/testpush")
def push_data():
data = Question(123,342,'This is a test',12,1)
session.add(data)
session.commit()
query = session.query(Question)
return jsonify(Question = [i.serialize for i in query])
'''
def getUser(userid):
user = session.query(User).filter(User.id == userid)
data = [i.serialize for i in user]
#return jsonify(User=[i.serialize for i in user])
return json.dumps(data)
def getBookQuestions(bookid):
questions = session.query(Question).filter(Question.book_id == bookid)
data = [i.serialize for i in questions]
return json.dumps(data)
def getQuestionOptions(questionid):
options = session.query(Option).filter(Option.question_id == questionid)
data = [i.serialize for i in options]
#return jsonify(Option=[i.serialize for i in options])
return json.dumps(data)
if __name__ == '__main__':
app.debug = True
app.run(host='0.0.0.0', port=5555) | {"/insertion.py": ["/modified_models.py"], "/modified_endpoints.py": ["/modified_models.py"]} |
62,345 | Wopros46527351/project_now | refs/heads/main | /scraper.py | from bs4 import BeautifulSoup
import requests
def get_info(url):
"""Gets CHIPDIP data from URL
Args:
url (str): url of product
Returns:
tuple: current price,count and name
"""
r = requests.get(url)
soup = BeautifulSoup(r.text, 'html.parser')
price = soup.find_all("span", {"class":'ordering__value'})[0]
count = soup.find_all("span", {"class":'item__avail item__avail_available item__avail_float'})[0]
name = soup.find("h1", itemprop='name')
name = name.text.replace(",",".")
count_transformed = ""
for c in count.text:
if c in "0123456789":
count_transformed+=c
count_transformed = float(count_transformed)
price = float(price.text)
return (price,count_transformed,name)
if __name__ == "__main__":
print("This file is not supposed to be launched by user.Press ENTER to exit")
input() | {"/main.py": ["/csvHandler.py", "/scraper.py", "/bd.py"]} |
62,346 | Wopros46527351/project_now | refs/heads/main | /bd.py | from pymongo import MongoClient
# pprint library is used to make the output look more pretty
from pprint import pprint
# connect to MongoDB, change the << MONGODB URL >> to reflect your own connection string
'''client = MongoClient("mongodb+srv://Biba_buba_13:Vgfgh4335RTF@huyaster.bi6ms.mongodb.net/myFirstDatabase?retryWrites=true&w=majority")
db = client.test
# Issue the serverStatus command and print the results
serverStatusResult=db.command("serverStatus")
pprint(serverStatusResult)'''
def make_push(db,product_name, date, product_number, product_price, url):
"""make push into database
Args:
product_name (str): name of the product
date (str): date of request
product_number (float): quantity of the product
product_price (float): current price
url (str): just url
Returns:
Nothing
"""
if db.product_mstator.find({'url':url}).count()==0:
push = {
"product_name": product_name,
"date": [date],
'product_number': [product_number],
'product_price': [product_price],
'url': url
}
db.product_mstator.insert_one(push)
else:
db.product_mstator.update_one({'url':url},{'$push': {
'date':date,
'product_number':product_number,
'product_price':product_price
}})
return None
def make_pull(db,url):
"""make pull request from database
Args:
url (str): url of the product
Returns:
result (dict): data about the product
"""
result = db.product_mstator.find_one({"url":url})
return result
def push_meta(db,date):
if db.meta.find({'name':"dates"}).count()==0:
db.meta.insert_one({'name':"dates",'dates':[date]})
else:
db.meta.update_one({'name':"dates"},{"$push":{'dates':date}})
urls = [e['url'] for e in db.product_mstator.find()]
if db.meta.find({'name':"urls"}).count()==0:
db.meta.insert_one({'name':"urls",'urls':urls})
else:
db.meta.update_one({'name':"urls"},{"$set":{'urls':urls}})
def get_dates(db):
"""get all dates from meta
Args:
db (db): database
Returns:
dates (dict): all dates
"""
return db.meta.find_one({'name':"dates"})['dates']
def get_urls(db):
"""get all urls from meta
Args:
db (db): database
Returns:
urls (dict): all dates
"""
return db.meta.find_one({'name':"urls"})['urls']
def connect_db():
client = MongoClient("mongodb+srv://admin:admin@arttechtestcluster.d7bmu.mongodb.net/myFirstDatabase?retryWrites=true&w=majority")
db = client.test
return db
"""
#тут подтягиваем в переменные данные
product_name='rere'
date='02.07.2021'
product_number=122
product_price=202
#ниже не трогать.оно работает
result = db.product_mstator.find_one({'product_name':product_name})
if result==None:
business = {
'product_name': product_name,
'date': [],
'product_number': [],
'product_price': []
}
db.product_mstator.insert_one(business)
db.product_mstator.update_one({'product_name':product_name }, {'$push': {'date' :date}})
db.product_mstator.update_one({'product_name':product_name }, {'$push': {'product_number':product_number}})
db.product_mstator.update_one({'product_name':product_name }, {'$push': {'product_price':product_price}})
"""
| {"/main.py": ["/csvHandler.py", "/scraper.py", "/bd.py"]} |
62,347 | Wopros46527351/project_now | refs/heads/main | /main.py | import csvHandler
import scraper
import bd
import datetime
f = open('target.txt', 'r')
db = bd.connect_db()
date = str(datetime.datetime.now().strftime("%d-%m-%Y %H:%M:%S"))
while True:
data = f.readline()
data = data.replace("\n","")
if data == "":
break
else:
print(data)
price,count,name = scraper.get_info(data)
#вызов функции
bd.make_push(db, name, date, count, price, data)
print(f"name:{name}\nprice:{price}\ncount:{count}\nPress ENTER")
bd.push_meta(db, date)
dates = bd.get_dates(db)
urls = bd.get_urls(db)
print(dates, urls)
file = csvHandler.create_csv(dates)
for url in urls:
result = bd.make_pull(db, url)
csvHandler.write_data(file, result["product_name"], result["product_price"], result["product_number"],dates,result['date'])
#csvHandler.write_data(file,name,price,count)
file.close() | {"/main.py": ["/csvHandler.py", "/scraper.py", "/bd.py"]} |
62,348 | Wopros46527351/project_now | refs/heads/main | /csvHandler.py | def create_csv(dates):
"""Creates csv file with set data string on top
Args:
dates (array): array of dates in file
Returns:
file: csv text file
"""
file = open("result.csv","w",encoding='utf-8')
file.write("Name,")
file.write("Price/Count,")
for date in dates:
file.write(f"{date},")
else:
file.write("\n")
return file
def write_data(file,name,prices,counts,dates_all,dates_product):
"""writes data to file
Args:
file (file): csv file to write to
name (str): name of product
prices (array): int array of prices
counts (array): int array of counts
"""
name = name.replace(',', '.')
file.write(f"{name},")
file.write(f"Price,")
index =0
for date in dates_all:
if dates_product[index] == date:
file.write(f"{prices[index]},")
index+=1
else:
file.write("\n")
file.write(f",")
file.write(f"Count,")
index =0
for date in dates_all:
if dates_product[index] == date:
file.write(f"{counts[index]},")
index+=1
else:
file.write("\n")
if __name__ == "__main__":
print("This file is not supposed to be launched by user.Press ENTER to exit")
input() | {"/main.py": ["/csvHandler.py", "/scraper.py", "/bd.py"]} |
62,349 | theduderog/Superlance | refs/heads/master | /superlance/tests/crashmailbatch_test.py | import unittest
import mock
import time
from StringIO import StringIO
class CrashMailBatchTests(unittest.TestCase):
fromEmail = 'testFrom@blah.com'
toEmail = 'testTo@blah.com'
subject = 'Test Alert'
now = 1279677400.1
unexpectedErrorMsg = '2010-07-20 18:56:40,099 -- Process bar:foo \
(pid 58597) died unexpectedly'
def _getTargetClass(self):
from superlance.crashmailbatch import CrashMailBatch
return CrashMailBatch
def _makeOneMocked(self, **kwargs):
kwargs['stdin'] = StringIO()
kwargs['stdout'] = StringIO()
kwargs['stderr'] = StringIO()
kwargs['fromEmail'] = kwargs.get('fromEmail', self.fromEmail)
kwargs['toEmail'] = kwargs.get('toEmail', self.toEmail)
kwargs['subject'] = kwargs.get('subject', self.subject)
kwargs['now'] = self.now
obj = self._getTargetClass()(**kwargs)
obj.sendEmail = mock.Mock()
return obj
def getProcessExitedEvent(self, pname, gname, expected):
headers = {
'ver': '3.0', 'poolserial': '7', 'len': '71',
'server': 'supervisor', 'eventname': 'PROCESS_STATE_EXITED',
'serial': '7', 'pool': 'checkmailbatch',
}
payload = 'processname:%s groupname:%s from_state:RUNNING expected:%d \
pid:58597' % (pname, gname, expected)
return (headers, payload)
def test_getProcessStateChangeMsg_expected(self):
crash = self._makeOneMocked()
hdrs, payload = self.getProcessExitedEvent('foo', 'bar', 1)
self.assertEquals(None, crash.getProcessStateChangeMsg(hdrs, payload))
def test_getProcessStateChangeMsg_unexpected(self):
crash = self._makeOneMocked()
hdrs, payload = self.getProcessExitedEvent('foo', 'bar', 0)
msg = crash.getProcessStateChangeMsg(hdrs, payload)
self.assertEquals(self.unexpectedErrorMsg, msg)
def test_handleEvent_exit_expected(self):
crash = self._makeOneMocked()
hdrs, payload = self.getProcessExitedEvent('foo', 'bar', 1)
crash.handleEvent(hdrs, payload)
self.assertEquals([], crash.getBatchMsgs())
self.assertEquals('', crash.stderr.getvalue())
def test_handleEvent_exit_unexpected(self):
crash = self._makeOneMocked()
hdrs, payload = self.getProcessExitedEvent('foo', 'bar', 0)
crash.handleEvent(hdrs, payload)
self.assertEquals([self.unexpectedErrorMsg], crash.getBatchMsgs())
self.assertEquals('%s\n' % self.unexpectedErrorMsg, crash.stderr.getvalue())
if __name__ == '__main__':
unittest.main() | {"/superlance/process_state_email_monitor.py": ["/superlance/process_state_monitor.py"], "/superlance/tests/process_state_email_monitor_test.py": ["/superlance/process_state_email_monitor.py"], "/superlance/tests/process_state_monitor_test.py": ["/superlance/process_state_monitor.py"]} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.