code
stringlengths 22
1.05M
| apis
listlengths 1
3.31k
| extract_api
stringlengths 75
3.25M
|
|---|---|---|
"""This file sets up the package"""
from setuptools import setup
setup(
name='wotw-cookiecutter-base',
version='0.2.0',
packages=[],
)
|
[
"setuptools.setup"
] |
[((67, 133), 'setuptools.setup', 'setup', ([], {'name': '"""wotw-cookiecutter-base"""', 'version': '"""0.2.0"""', 'packages': '[]'}), "(name='wotw-cookiecutter-base', version='0.2.0', packages=[])\n", (72, 133), False, 'from setuptools import setup\n')]
|
import yaml
from collections import UserDict
from rebuild_tool.exceptions import IncompleteMetadataException, UnknownPluginException
from rebuild_tool.builder_plugins.builder_loader import available_builder_plugins
from rebuild_tool.pkg_source_plugins.pkg_source_loader import available_pkg_source_plugins
def get_file_data(input_file, split=False):
'''
Opens given file and reads it,
returns string datai, can cause IOError exception
'''
with open(input_file, 'r') as fi:
data = fi.read()
if split:
return data.splitlines()
else:
return data
class RebuildMetadata(UserDict):
'''
Class to load, check and store all rebuild metadata
'''
def __init__(self, yaml_data):
super(self.__class__, self).__init__()
self.data = yaml.load(yaml_data)
for attr in ['build_system', 'packages_source', 'repo', 'packages']:
if attr not in self:
raise IncompleteMetadataException("Missing Rebuild file attribute: {}.".format(attr))
if self['build_system'] not in available_builder_plugins:
raise UnknownPluginException("Builder plugin: {} specified in Rebuild file not available.".format(
self['build_system']))
if self['packages_source'] not in available_pkg_source_plugins:
raise UnknownPluginException("Packages source plugin: {} specified in Rebuild file not available.".format(
self['packages_source']))
if 'metapackage' in self:
self['packages'].append(self['metapackage'])
if not 'prefix' in self:
self['prefix'] = ""
for attr in ["chroots", "recipes", "chroot_pkgs", "packages"]:
if attr in self:
if not isinstance(self[attr], list):
self[attr] = [self[attr]]
if self['packages_source'] == 'koji':
if 'koji_tag' not in self:
raise IncompleteMetadataException("Missing Rebuild file attribute: koji_tag necesary to get srpms from koji.")
else:
self['koji_tag'] = None
class Recipe(yaml.YAMLObject):
'''
Class to store order of building recipe, reads data from
yml file in format:
- ['package1', 'bootstrap 0']
- ['package2']
- ['package1', 'bootstrap 1']
...
'''
def __init__(self, recipe_file):
self.packages = set()
self.order = get_file_data(recipe_file)
self.get_packages()
@property
def order(self):
return self.__order
@order.setter
def order(self, recipe_data):
self.__order = yaml.load(recipe_data)
def get_packages(self):
'''
Fills packages set with all packages names present in recipe
'''
if not hasattr(self, 'order'):
return
for item in self.order:
self.packages.add(item[0])
|
[
"yaml.load",
"rebuild_tool.exceptions.IncompleteMetadataException"
] |
[((821, 841), 'yaml.load', 'yaml.load', (['yaml_data'], {}), '(yaml_data)\n', (830, 841), False, 'import yaml\n'), ((2681, 2703), 'yaml.load', 'yaml.load', (['recipe_data'], {}), '(recipe_data)\n', (2690, 2703), False, 'import yaml\n'), ((1998, 2112), 'rebuild_tool.exceptions.IncompleteMetadataException', 'IncompleteMetadataException', (['"""Missing Rebuild file attribute: koji_tag necesary to get srpms from koji."""'], {}), "(\n 'Missing Rebuild file attribute: koji_tag necesary to get srpms from koji.'\n )\n", (2025, 2112), False, 'from rebuild_tool.exceptions import IncompleteMetadataException, UnknownPluginException\n')]
|
# -*- coding: utf-8 -*-
import os, sys
import shutil
import tissueloc as tl
from tissueloc.load_slide import load_slide_img, select_slide_level
import numpy as np
from skimage import io, color
import cv2
if __name__ == "__main__":
slide_dir = "../data/TestSlides/Malignant"
save_dir = "../data/TestSlides/MalignantTissue"
if os.path.exists(save_dir):
shutil.rmtree(save_dir)
os.makedirs(save_dir)
slide_list = [ele for ele in os.listdir(slide_dir) if "tiff" in ele]
for ind, ele in enumerate(slide_list):
slide_path = os.path.join(slide_dir, ele)
cnts, d_factor = tl.locate_tissue_cnts(slide_path, max_img_size=2048, smooth_sigma=13,
thresh_val=0.88,min_tissue_size=10000)
s_level, d_factor = select_slide_level(slide_path, max_size=2048)
slide_img = load_slide_img(slide_path, s_level)
slide_img = np.ascontiguousarray(slide_img, dtype=np.uint8)
cv2.drawContours(slide_img, cnts, -1, (0, 255, 0), 9)
io.imsave(os.path.join(save_dir, os.path.join(os.path.splitext(ele)[0]+'_cnt.png')), slide_img)
|
[
"os.makedirs",
"tissueloc.load_slide.load_slide_img",
"numpy.ascontiguousarray",
"os.path.exists",
"cv2.drawContours",
"os.path.splitext",
"tissueloc.load_slide.select_slide_level",
"shutil.rmtree",
"os.path.join",
"os.listdir",
"tissueloc.locate_tissue_cnts"
] |
[((342, 366), 'os.path.exists', 'os.path.exists', (['save_dir'], {}), '(save_dir)\n', (356, 366), False, 'import os, sys\n'), ((404, 425), 'os.makedirs', 'os.makedirs', (['save_dir'], {}), '(save_dir)\n', (415, 425), False, 'import os, sys\n'), ((376, 399), 'shutil.rmtree', 'shutil.rmtree', (['save_dir'], {}), '(save_dir)\n', (389, 399), False, 'import shutil\n'), ((564, 592), 'os.path.join', 'os.path.join', (['slide_dir', 'ele'], {}), '(slide_dir, ele)\n', (576, 592), False, 'import os, sys\n'), ((618, 731), 'tissueloc.locate_tissue_cnts', 'tl.locate_tissue_cnts', (['slide_path'], {'max_img_size': '(2048)', 'smooth_sigma': '(13)', 'thresh_val': '(0.88)', 'min_tissue_size': '(10000)'}), '(slide_path, max_img_size=2048, smooth_sigma=13,\n thresh_val=0.88, min_tissue_size=10000)\n', (639, 731), True, 'import tissueloc as tl\n'), ((802, 847), 'tissueloc.load_slide.select_slide_level', 'select_slide_level', (['slide_path'], {'max_size': '(2048)'}), '(slide_path, max_size=2048)\n', (820, 847), False, 'from tissueloc.load_slide import load_slide_img, select_slide_level\n'), ((868, 903), 'tissueloc.load_slide.load_slide_img', 'load_slide_img', (['slide_path', 's_level'], {}), '(slide_path, s_level)\n', (882, 903), False, 'from tissueloc.load_slide import load_slide_img, select_slide_level\n'), ((924, 971), 'numpy.ascontiguousarray', 'np.ascontiguousarray', (['slide_img'], {'dtype': 'np.uint8'}), '(slide_img, dtype=np.uint8)\n', (944, 971), True, 'import numpy as np\n'), ((980, 1033), 'cv2.drawContours', 'cv2.drawContours', (['slide_img', 'cnts', '(-1)', '(0, 255, 0)', '(9)'], {}), '(slide_img, cnts, -1, (0, 255, 0), 9)\n', (996, 1033), False, 'import cv2\n'), ((460, 481), 'os.listdir', 'os.listdir', (['slide_dir'], {}), '(slide_dir)\n', (470, 481), False, 'import os, sys\n'), ((1088, 1109), 'os.path.splitext', 'os.path.splitext', (['ele'], {}), '(ele)\n', (1104, 1109), False, 'import os, sys\n')]
|
from dataclasses import dataclass
from typing import List, Optional
from erica.erica_legacy.elster_xml.common.basic_xml_data_representation import ENutzdaten, construct_basic_xml_data_representation
from erica.erica_legacy.elster_xml.grundsteuer.elster_eigentuemer import EAngFeststellung, EPersonData, EEigentumsverh, \
EEmpfangsbevollmaechtigter
from erica.erica_legacy.elster_xml.grundsteuer.elster_gebaeude import EAngWohn
from erica.erica_legacy.request_processing.erica_input.v2.grundsteuer_input import GrundsteuerData
from erica.erica_legacy.request_processing.erica_input.v2.grundsteuer_input_eigentuemer import \
Eigentuemer as EigentuemerInput
"""
The content of the Grundsteuer Nutzdaten XML as its data prepresentation.
The classes are prefixed with "E" for "Elster".
"""
@dataclass
class EGW1:
Ang_Feststellung: EAngFeststellung
Eigentuemer: List[EPersonData]
Eigentumsverh: EEigentumsverh
Empfangsv: Optional[EEmpfangsbevollmaechtigter]
def __init__(self, input_data: EigentuemerInput):
self.Ang_Feststellung = EAngFeststellung()
self.Eigentuemer = []
for index, input_eigentuemer in enumerate(input_data.person):
new_eigentuemer = EPersonData(input_eigentuemer, index)
self.Eigentuemer.append(new_eigentuemer)
self.Eigentumsverh = EEigentumsverh(input_data)
if hasattr(input_data, "empfangsbevollmaechtigter") and input_data.empfangsbevollmaechtigter:
self.Empfangsv = EEmpfangsbevollmaechtigter(input_data.empfangsbevollmaechtigter)
else:
self.Empfangsv = None
@dataclass
class EGW2:
Ang_Wohn: EAngWohn
def __init__(self, input_data: GrundsteuerData):
self.Ang_Wohn = EAngWohn(input_data.gebaeude)
@dataclass
class ERueckuebermittlung:
Bescheid: str
def __init__(self):
self.Bescheid = '2' # No "Bescheiddatenabholung"
@dataclass
class EVorsatz:
Unterfallart: str
Vorgang: str
StNr: str
Zeitraum: str
AbsName: str
AbsStr: str
AbsPlz: str
AbsOrt: str
Copyright: str
OrdNrArt: str
Rueckuebermittlung: ERueckuebermittlung
def __init__(self, input_data: GrundsteuerData):
self.Unterfallart = "88" # Grundsteuer
self.Vorgang = "01" # Veranlagung
# TODO
self.StNr = "1121081508150"
self.Zeitraum = "2022" # TODO require on input?
self.AbsName = input_data.eigentuemer.person[0].persoenlicheAngaben.vorname + " " + \
input_data.eigentuemer.person[0].persoenlicheAngaben.name
self.AbsStr = input_data.eigentuemer.person[0].adresse.strasse
self.AbsPlz = input_data.eigentuemer.person[0].adresse.plz
self.AbsOrt = input_data.eigentuemer.person[0].adresse.ort
self.Copyright = "(C) 2022 DigitalService4Germany"
# TODO Steuernummer or Aktenzeichen?
self.OrdNrArt = "S"
self.Rueckuebermittlung = ERueckuebermittlung()
@dataclass
class EGrundsteuerSpecifics:
GW1: EGW1
GW2: EGW2
Vorsatz: EVorsatz
xml_attr_version: str
xml_attr_xmlns: str
def __init__(self, input_data: GrundsteuerData):
self.GW1 = EGW1(input_data.eigentuemer)
self.GW2 = EGW2(input_data)
self.Vorsatz = EVorsatz(input_data)
self.xml_attr_version = "2"
self.xml_attr_xmlns = "http://finkonsens.de/elster/elstererklaerung/grundsteuerwert/e88/v2"
@dataclass
class EGrundsteuerData(ENutzdaten):
E88: EGrundsteuerSpecifics
def __init__(self, input_data: GrundsteuerData):
self.E88 = EGrundsteuerSpecifics(input_data)
def get_full_grundsteuer_data_representation(input_data: GrundsteuerData):
""" Returns the full data representation of an elster XML for the Grundsteuer use case. """
grundsteuer_elster_data_representation = EGrundsteuerData(input_data)
# TODO set BuFa correctly
return construct_basic_xml_data_representation(empfaenger_id='F', empfaenger_text="1121",
nutzdaten_object=grundsteuer_elster_data_representation,
nutzdaten_header_version="11")
|
[
"erica.erica_legacy.elster_xml.grundsteuer.elster_eigentuemer.EAngFeststellung",
"erica.erica_legacy.elster_xml.grundsteuer.elster_eigentuemer.EEigentumsverh",
"erica.erica_legacy.elster_xml.grundsteuer.elster_gebaeude.EAngWohn",
"erica.erica_legacy.elster_xml.grundsteuer.elster_eigentuemer.EEmpfangsbevollmaechtigter",
"erica.erica_legacy.elster_xml.common.basic_xml_data_representation.construct_basic_xml_data_representation",
"erica.erica_legacy.elster_xml.grundsteuer.elster_eigentuemer.EPersonData"
] |
[((3915, 4094), 'erica.erica_legacy.elster_xml.common.basic_xml_data_representation.construct_basic_xml_data_representation', 'construct_basic_xml_data_representation', ([], {'empfaenger_id': '"""F"""', 'empfaenger_text': '"""1121"""', 'nutzdaten_object': 'grundsteuer_elster_data_representation', 'nutzdaten_header_version': '"""11"""'}), "(empfaenger_id='F', empfaenger_text=\n '1121', nutzdaten_object=grundsteuer_elster_data_representation,\n nutzdaten_header_version='11')\n", (3954, 4094), False, 'from erica.erica_legacy.elster_xml.common.basic_xml_data_representation import ENutzdaten, construct_basic_xml_data_representation\n'), ((1075, 1093), 'erica.erica_legacy.elster_xml.grundsteuer.elster_eigentuemer.EAngFeststellung', 'EAngFeststellung', ([], {}), '()\n', (1091, 1093), False, 'from erica.erica_legacy.elster_xml.grundsteuer.elster_eigentuemer import EAngFeststellung, EPersonData, EEigentumsverh, EEmpfangsbevollmaechtigter\n'), ((1344, 1370), 'erica.erica_legacy.elster_xml.grundsteuer.elster_eigentuemer.EEigentumsverh', 'EEigentumsverh', (['input_data'], {}), '(input_data)\n', (1358, 1370), False, 'from erica.erica_legacy.elster_xml.grundsteuer.elster_eigentuemer import EAngFeststellung, EPersonData, EEigentumsverh, EEmpfangsbevollmaechtigter\n'), ((1742, 1771), 'erica.erica_legacy.elster_xml.grundsteuer.elster_gebaeude.EAngWohn', 'EAngWohn', (['input_data.gebaeude'], {}), '(input_data.gebaeude)\n', (1750, 1771), False, 'from erica.erica_legacy.elster_xml.grundsteuer.elster_gebaeude import EAngWohn\n'), ((1224, 1261), 'erica.erica_legacy.elster_xml.grundsteuer.elster_eigentuemer.EPersonData', 'EPersonData', (['input_eigentuemer', 'index'], {}), '(input_eigentuemer, index)\n', (1235, 1261), False, 'from erica.erica_legacy.elster_xml.grundsteuer.elster_eigentuemer import EAngFeststellung, EPersonData, EEigentumsverh, EEmpfangsbevollmaechtigter\n'), ((1503, 1567), 'erica.erica_legacy.elster_xml.grundsteuer.elster_eigentuemer.EEmpfangsbevollmaechtigter', 'EEmpfangsbevollmaechtigter', (['input_data.empfangsbevollmaechtigter'], {}), '(input_data.empfangsbevollmaechtigter)\n', (1529, 1567), False, 'from erica.erica_legacy.elster_xml.grundsteuer.elster_eigentuemer import EAngFeststellung, EPersonData, EEigentumsverh, EEmpfangsbevollmaechtigter\n')]
|
#! /usr/bin/env python
"""Run a YOLO_v2 style detection model on test images."""
import argparse
import colorsys
import imghdr
import os
import random
import numpy as np
from keras import backend as K
from keras.models import load_model
from PIL import Image, ImageDraw, ImageFont
from yad2k.models.keras_yolo import yolo_eval, yolo_head
import shutil
def _main(session, args_model_path, args_anchors_path, args_classes_path, args_test_path, args_output_path):
model_path = args_model_path
assert model_path.endswith('.h5'), 'Keras model must be a .h5 file.'
anchors_path = args_anchors_path
classes_path = args_classes_path
test_path = args_test_path
output_path = args_output_path
args_score_threshold = .3
args_iou_threshold = .5
if not os.path.exists(output_path):
print('Creating output path {}'.format(output_path))
os.mkdir(output_path)
# sess = K.get_session() # TODO: Remove dependence on Tensorflow session.
sess = session
with open(classes_path) as f:
class_names = f.readlines()
class_names = [c.strip() for c in class_names]
with open(anchors_path) as f:
anchors = f.readline()
anchors = [float(x) for x in anchors.split(',')]
anchors = np.array(anchors).reshape(-1, 2)
yolo_model = load_model(model_path)
# Verify model, anchors, and classes are compatible
num_classes = len(class_names)
num_anchors = len(anchors)
# TODO: Assumes dim ordering is channel last
model_output_channels = yolo_model.layers[-1].output_shape[-1]
assert model_output_channels == num_anchors * (num_classes + 5), \
'Mismatch between model and given anchor and class sizes. ' \
'Specify matching anchors and classes with --anchors_path and ' \
'--classes_path flags.'
print('{} model, anchors, and classes loaded.'.format(model_path))
# Check if model is fully convolutional, assuming channel last order.
model_image_size = yolo_model.layers[0].input_shape[1:3]
is_fixed_size = model_image_size != (None, None)
# Generate colors for drawing bounding boxes.
hsv_tuples = [(x / len(class_names), 1., 1.)
for x in range(len(class_names))]
colors = list(map(lambda x: colorsys.hsv_to_rgb(*x), hsv_tuples))
colors = list(
map(lambda x: (int(x[0] * 255), int(x[1] * 255), int(x[2] * 255)),
colors))
random.seed(10101) # Fixed seed for consistent colors across runs.
random.shuffle(colors) # Shuffle colors to decorrelate adjacent classes.
random.seed(None) # Reset seed to default.
# Generate output tensor targets for filtered bounding boxes.
# TODO: Wrap these backend operations with Keras layers.
yolo_outputs = yolo_head(yolo_model.output, anchors, len(class_names))
input_image_shape = K.placeholder(shape=(2, ))
boxes, scores, classes = yolo_eval(
yolo_outputs,
input_image_shape,
score_threshold=args_score_threshold,
iou_threshold=args_iou_threshold)
for image_file in os.listdir(test_path):
# try:
# image_type = imghdr.what(os.path.join(test_path, image_file))
# if not image_type:
# continue
# except IsADirectoryError:
# continue
image = Image.open(os.path.join(test_path, image_file))
if is_fixed_size: # TODO: When resizing we can use minibatch input.
resized_image = image.resize(
tuple(reversed(model_image_size)), Image.BICUBIC)
image_data = np.array(resized_image, dtype='float32')
else:
# Due to skip connection + max pooling in YOLO_v2, inputs must have
# width and height as multiples of 32.
new_image_size = (image.width - (image.width % 32),
image.height - (image.height % 32))
resized_image = image.resize(new_image_size, Image.BICUBIC)
image_data = np.array(resized_image, dtype='float32')
print(image_data.shape)
image_data /= 255.
image_data = np.expand_dims(image_data, 0) # Add batch dimension.
out_boxes, out_scores, out_classes = sess.run(
[boxes, scores, classes],
feed_dict={
yolo_model.input: image_data,
input_image_shape: [image.size[1], image.size[0]],
K.learning_phase(): 0
})
print('Found {} boxes for {}'.format(len(out_boxes), image_file))
font = ImageFont.truetype(
font='font/FiraMono-Medium.otf',
size=np.floor(3e-2 * image.size[1] + 0.5).astype('int32'))
thickness = (image.size[0] + image.size[1]) // 300
max_score = 0
for i, c in reversed(list(enumerate(out_classes))):
predicted_class = class_names[c]
box = out_boxes[i]
score = out_scores[i]
label = '{} {:.2f}'.format(predicted_class, score)
draw = ImageDraw.Draw(image)
label_size = draw.textsize(label, font)
top, left, bottom, right = box
top = max(0, np.floor(top + 0.5).astype('int32'))
left = max(0, np.floor(left + 0.5).astype('int32'))
bottom = min(image.size[1], np.floor(bottom + 0.5).astype('int32'))
right = min(image.size[0], np.floor(right + 0.5).astype('int32'))
print(label, (left, top), (right, bottom))
if top - label_size[1] >= 0:
text_origin = np.array([left, top - label_size[1]])
else:
text_origin = np.array([left, top + 1])
# # My kingdom for a good redistributable image drawing library.
# for i in range(thickness):
# draw.rectangle(
# [left + i, top + i, right - i, bottom - i],
# outline=colors[c])
# draw.rectangle(
# [tuple(text_origin), tuple(text_origin + label_size)],
# fill=colors[c])
# draw.text(text_origin, label, fill=(0, 0, 0), font=font)
# del draw
if predicted_class == 'dog':
if score > max_score:
if max_score > 0:
print('-' * 10)
border = 10
max_score = score
crop_box = left - border, top - border, right + border, bottom + border
cropped_img = image.crop(crop_box)
cropped_img.save(os.path.join(output_path, image_file), quality=90)
else:
shutil.copyfile(os.path.join(test_path, image_file), os.path.join(output_path, image_file))
# image.save(os.path.join(output_path, image_file), quality=90)
def _main_input():
model_path = 'model_data/yolo.h5'
anchors_path = 'model_data/yolo_anchors.txt'
classes_path = 'model_data/pascal_classes.txt'
# model_path = args_model_path
assert model_path.endswith('.h5'), 'Keras model must be a .h5 file.'
# anchors_path = args_anchors_path
# classes_path = args_classes_path
# test_path = args_test_path
# output_path = args_output_path
intput_path = 'D:/Udacity/MachineLearning(Advanced)/p6_graduation_project/input'
data_folders = ['data_train', 'data_val', 'data_test']
args_score_threshold = .3
args_iou_threshold = .5
count_max_dog = 0
count_no_dog = 0
count_no_object = 0
# if not os.path.exists(output_path):
# print('Creating output path {}'.format(output_path))
# os.mkdir(output_path)
sess = K.get_session() # TODO: Remove dependence on Tensorflow session.
# sess = session
with open(classes_path) as f:
class_names = f.readlines()
class_names = [c.strip() for c in class_names]
with open(anchors_path) as f:
anchors = f.readline()
anchors = [float(x) for x in anchors.split(',')]
anchors = np.array(anchors).reshape(-1, 2)
yolo_model = load_model(model_path)
# Verify model, anchors, and classes are compatible
num_classes = len(class_names)
num_anchors = len(anchors)
# TODO: Assumes dim ordering is channel last
model_output_channels = yolo_model.layers[-1].output_shape[-1]
assert model_output_channels == num_anchors * (num_classes + 5), \
'Mismatch between model and given anchor and class sizes. ' \
'Specify matching anchors and classes with --anchors_path and ' \
'--classes_path flags.'
print('{} model, anchors, and classes loaded.'.format(model_path))
# Check if model is fully convolutional, assuming channel last order.
model_image_size = yolo_model.layers[0].input_shape[1:3]
is_fixed_size = model_image_size != (None, None)
# Generate colors for drawing bounding boxes.
hsv_tuples = [(x / len(class_names), 1., 1.)
for x in range(len(class_names))]
colors = list(map(lambda x: colorsys.hsv_to_rgb(*x), hsv_tuples))
colors = list(
map(lambda x: (int(x[0] * 255), int(x[1] * 255), int(x[2] * 255)),
colors))
random.seed(10101) # Fixed seed for consistent colors across runs.
random.shuffle(colors) # Shuffle colors to decorrelate adjacent classes.
random.seed(None) # Reset seed to default.
# Generate output tensor targets for filtered bounding boxes.
# TODO: Wrap these backend operations with Keras layers.
yolo_outputs = yolo_head(yolo_model.output, anchors, len(class_names))
input_image_shape = K.placeholder(shape=(2, ))
boxes, scores, classes = yolo_eval(
yolo_outputs,
input_image_shape,
score_threshold=args_score_threshold,
iou_threshold=args_iou_threshold)
for data_folder_name in data_folders:
data_folder = os.path.join(intput_path, data_folder_name)
output_folder = os.path.join(intput_path, 'yolo_' + data_folder_name)
if not os.path.exists(output_folder):
print('Create folders: %s' % output_folder)
os.makedirs(output_folder)
else:
print('Folder exists: %s' % output_folder)
for class_folder_name in os.listdir(data_folder):
test_path = os.path.join(data_folder, class_folder_name)
output_path = os.path.join(output_folder, class_folder_name)
if not os.path.exists(output_path):
print('Create folders: %s' % output_path)
os.makedirs(output_path)
else:
print('Folder exists: %s' % output_path)
for image_file in os.listdir(test_path):
# try:
# image_type = imghdr.what(os.path.join(test_path, image_file))
# if not image_type:
# continue
# except IsADirectoryError:
# continue
image = Image.open(os.path.join(test_path, image_file))
if is_fixed_size: # TODO: When resizing we can use minibatch input.
resized_image = image.resize(
tuple(reversed(model_image_size)), Image.BICUBIC)
image_data = np.array(resized_image, dtype='float32')
else:
# Due to skip connection + max pooling in YOLO_v2, inputs must have
# width and height as multiples of 32.
new_image_size = (image.width - (image.width % 32),
image.height - (image.height % 32))
resized_image = image.resize(new_image_size, Image.BICUBIC)
image_data = np.array(resized_image, dtype='float32')
print(image_data.shape)
image_data /= 255.
image_data = np.expand_dims(image_data, 0) # Add batch dimension.
try:
out_boxes, out_scores, out_classes = sess.run(
[boxes, scores, classes],
feed_dict={
yolo_model.input: image_data,
input_image_shape: [image.size[1], image.size[0]],
K.learning_phase(): 0
})
except Exception as ex:
print('Err: %s' % image_file)
print(ex)
shutil.copyfile(os.path.join(test_path, image_file), os.path.join(output_path, image_file))
continue
# print('Found {} boxes for {}'.format(len(out_boxes), image_file))
font = ImageFont.truetype(
font='font/FiraMono-Medium.otf',
size=np.floor(3e-2 * image.size[1] + 0.5).astype('int32'))
thickness = (image.size[0] + image.size[1]) // 300
max_score = 0
if len(out_classes) > 0:
for i, c in reversed(list(enumerate(out_classes))):
predicted_class = class_names[c]
box = out_boxes[i]
score = out_scores[i]
label = '{} {:.2f}'.format(predicted_class, score)
draw = ImageDraw.Draw(image)
label_size = draw.textsize(label, font)
top, left, bottom, right = box
top = max(0, np.floor(top + 0.5).astype('int32'))
left = max(0, np.floor(left + 0.5).astype('int32'))
bottom = min(image.size[1], np.floor(bottom + 0.5).astype('int32'))
right = min(image.size[0], np.floor(right + 0.5).astype('int32'))
# print(label, (left, top), (right, bottom))
if top - label_size[1] >= 0:
text_origin = np.array([left, top - label_size[1]])
else:
text_origin = np.array([left, top + 1])
# # My kingdom for a good redistributable image drawing library.
# for i in range(thickness):
# draw.rectangle(
# [left + i, top + i, right - i, bottom - i],
# outline=colors[c])
# draw.rectangle(
# [tuple(text_origin), tuple(text_origin + label_size)],
# fill=colors[c])
# draw.text(text_origin, label, fill=(0, 0, 0), font=font)
# del draw
if predicted_class == 'dog':
if score > max_score:
if max_score > 0:
print('+' * 10)
count_max_dog += 1
border = 10
max_score = score
crop_box = left - border, top - border, right + border, bottom + border
cropped_img = image.crop(crop_box)
cropped_img.save(os.path.join(output_path, image_file), quality=90)
else:
count_no_dog += 1
print('-' * 10)
shutil.copyfile(os.path.join(test_path, image_file), os.path.join(output_path, image_file))
else:
count_no_object += 1
print('*' * 10)
shutil.copyfile(os.path.join(test_path, image_file), os.path.join(output_path, image_file))
print('%s %s %s' %(count_max_dog, count_no_dog, count_no_object))
# image.save(os.path.join(output_path, image_file), quality=90)
if __name__ == '__main__':
# sess = K.get_session() # TODO: Remove dependence on Tensorflow session.
# 测试YOLO自带的图片
model_path = 'model_data/yolo.h5'
anchors_path = 'model_data/yolo_anchors.txt'
classes_path = 'model_data/pascal_classes.txt'
# test_path = 'images'
# output_path = 'images/out'
# _main(model_path, anchors_path, classes_path, test_path, output_path)
# 处理inputdata
_main_input()
# # 处理data_train
# test_path = 'D:/Udacity/MachineLearning(Advanced)/p6_graduation_project/input/data_train'
# output_path = 'D:/Udacity/MachineLearning(Advanced)/p6_graduation_project/input/yolo_data_train'
# for folder_name in os.listdir(test_path):
# in_path = os.path.join(test_path, folder_name)
# out_path = os.path.join(output_path, folder_name)
# if not os.path.exists(out_path):
# print('Create folder: %s' % out_path)
# os.makedirs(out_path)
# else:
# print('Folder exists: %s' % out_path)
# # _main(sess, model_path, anchors_path, classes_path, in_path, out_path)
# # 处理data_val
# test_path = 'D:/Udacity/MachineLearning(Advanced)/p6_graduation_project/input/data_val'
# output_path = 'D:/Udacity/MachineLearning(Advanced)/p6_graduation_project/input/yolo_data_val'
# for folder_name in os.listdir(test_path):
# in_path = os.path.join(test_path, folder_name)
# out_path = os.path.join(output_path, folder_name)
# if not os.path.exists(out_path):
# print('Create folder: %s' % out_path)
# os.makedirs(out_path)
# else:
# print('Folder exists: %s' % out_path)
# # _main(sess, model_path, anchors_path, classes_path, in_path, out_path)
# # 处理data_test
# test_path = 'D:/Udacity/MachineLearning(Advanced)/p6_graduation_project/input/data_test'
# output_path = 'D:/Udacity/MachineLearning(Advanced)/p6_graduation_project/input/yolo_data_test'
# for folder_name in os.listdir(test_path):
# in_path = os.path.join(test_path, folder_name)
# out_path = os.path.join(output_path, folder_name)
# if not os.path.exists(out_path):
# print('Create folder: %s' % out_path)
# os.makedirs(out_path)
# else:
# print('Folder exists: %s' % out_path)
# # _main(sess, model_path, anchors_path, classes_path, in_path, out_path)
# sess.close()
|
[
"keras.models.load_model",
"keras.backend.placeholder",
"os.mkdir",
"os.makedirs",
"colorsys.hsv_to_rgb",
"keras.backend.learning_phase",
"keras.backend.get_session",
"random.shuffle",
"numpy.floor",
"os.path.exists",
"numpy.expand_dims",
"yad2k.models.keras_yolo.yolo_eval",
"random.seed",
"numpy.array",
"PIL.ImageDraw.Draw",
"os.path.join",
"os.listdir"
] |
[((1317, 1339), 'keras.models.load_model', 'load_model', (['model_path'], {}), '(model_path)\n', (1327, 1339), False, 'from keras.models import load_model\n'), ((2427, 2445), 'random.seed', 'random.seed', (['(10101)'], {}), '(10101)\n', (2438, 2445), False, 'import random\n'), ((2499, 2521), 'random.shuffle', 'random.shuffle', (['colors'], {}), '(colors)\n', (2513, 2521), False, 'import random\n'), ((2577, 2594), 'random.seed', 'random.seed', (['None'], {}), '(None)\n', (2588, 2594), False, 'import random\n'), ((2848, 2873), 'keras.backend.placeholder', 'K.placeholder', ([], {'shape': '(2,)'}), '(shape=(2,))\n', (2861, 2873), True, 'from keras import backend as K\n'), ((2904, 3023), 'yad2k.models.keras_yolo.yolo_eval', 'yolo_eval', (['yolo_outputs', 'input_image_shape'], {'score_threshold': 'args_score_threshold', 'iou_threshold': 'args_iou_threshold'}), '(yolo_outputs, input_image_shape, score_threshold=\n args_score_threshold, iou_threshold=args_iou_threshold)\n', (2913, 3023), False, 'from yad2k.models.keras_yolo import yolo_eval, yolo_head\n'), ((3075, 3096), 'os.listdir', 'os.listdir', (['test_path'], {}), '(test_path)\n', (3085, 3096), False, 'import os\n'), ((7662, 7677), 'keras.backend.get_session', 'K.get_session', ([], {}), '()\n', (7675, 7677), True, 'from keras import backend as K\n'), ((8063, 8085), 'keras.models.load_model', 'load_model', (['model_path'], {}), '(model_path)\n', (8073, 8085), False, 'from keras.models import load_model\n'), ((9173, 9191), 'random.seed', 'random.seed', (['(10101)'], {}), '(10101)\n', (9184, 9191), False, 'import random\n'), ((9245, 9267), 'random.shuffle', 'random.shuffle', (['colors'], {}), '(colors)\n', (9259, 9267), False, 'import random\n'), ((9323, 9340), 'random.seed', 'random.seed', (['None'], {}), '(None)\n', (9334, 9340), False, 'import random\n'), ((9594, 9619), 'keras.backend.placeholder', 'K.placeholder', ([], {'shape': '(2,)'}), '(shape=(2,))\n', (9607, 9619), True, 'from keras import backend as K\n'), ((9650, 9769), 'yad2k.models.keras_yolo.yolo_eval', 'yolo_eval', (['yolo_outputs', 'input_image_shape'], {'score_threshold': 'args_score_threshold', 'iou_threshold': 'args_iou_threshold'}), '(yolo_outputs, input_image_shape, score_threshold=\n args_score_threshold, iou_threshold=args_iou_threshold)\n', (9659, 9769), False, 'from yad2k.models.keras_yolo import yolo_eval, yolo_head\n'), ((784, 811), 'os.path.exists', 'os.path.exists', (['output_path'], {}), '(output_path)\n', (798, 811), False, 'import os\n'), ((882, 903), 'os.mkdir', 'os.mkdir', (['output_path'], {}), '(output_path)\n', (890, 903), False, 'import os\n'), ((4122, 4151), 'numpy.expand_dims', 'np.expand_dims', (['image_data', '(0)'], {}), '(image_data, 0)\n', (4136, 4151), True, 'import numpy as np\n'), ((9868, 9911), 'os.path.join', 'os.path.join', (['intput_path', 'data_folder_name'], {}), '(intput_path, data_folder_name)\n', (9880, 9911), False, 'import os\n'), ((9936, 9989), 'os.path.join', 'os.path.join', (['intput_path', "('yolo_' + data_folder_name)"], {}), "(intput_path, 'yolo_' + data_folder_name)\n", (9948, 9989), False, 'import os\n'), ((10234, 10257), 'os.listdir', 'os.listdir', (['data_folder'], {}), '(data_folder)\n', (10244, 10257), False, 'import os\n'), ((3336, 3371), 'os.path.join', 'os.path.join', (['test_path', 'image_file'], {}), '(test_path, image_file)\n', (3348, 3371), False, 'import os\n'), ((3583, 3623), 'numpy.array', 'np.array', (['resized_image'], {'dtype': '"""float32"""'}), "(resized_image, dtype='float32')\n", (3591, 3623), True, 'import numpy as np\n'), ((3996, 4036), 'numpy.array', 'np.array', (['resized_image'], {'dtype': '"""float32"""'}), "(resized_image, dtype='float32')\n", (4004, 4036), True, 'import numpy as np\n'), ((5031, 5052), 'PIL.ImageDraw.Draw', 'ImageDraw.Draw', (['image'], {}), '(image)\n', (5045, 5052), False, 'from PIL import Image, ImageDraw, ImageFont\n'), ((10005, 10034), 'os.path.exists', 'os.path.exists', (['output_folder'], {}), '(output_folder)\n', (10019, 10034), False, 'import os\n'), ((10104, 10130), 'os.makedirs', 'os.makedirs', (['output_folder'], {}), '(output_folder)\n', (10115, 10130), False, 'import os\n'), ((10283, 10327), 'os.path.join', 'os.path.join', (['data_folder', 'class_folder_name'], {}), '(data_folder, class_folder_name)\n', (10295, 10327), False, 'import os\n'), ((10354, 10400), 'os.path.join', 'os.path.join', (['output_folder', 'class_folder_name'], {}), '(output_folder, class_folder_name)\n', (10366, 10400), False, 'import os\n'), ((10654, 10675), 'os.listdir', 'os.listdir', (['test_path'], {}), '(test_path)\n', (10664, 10675), False, 'import os\n'), ((1266, 1283), 'numpy.array', 'np.array', (['anchors'], {}), '(anchors)\n', (1274, 1283), True, 'import numpy as np\n'), ((2270, 2293), 'colorsys.hsv_to_rgb', 'colorsys.hsv_to_rgb', (['*x'], {}), '(*x)\n', (2289, 2293), False, 'import colorsys\n'), ((5560, 5597), 'numpy.array', 'np.array', (['[left, top - label_size[1]]'], {}), '([left, top - label_size[1]])\n', (5568, 5597), True, 'import numpy as np\n'), ((5646, 5671), 'numpy.array', 'np.array', (['[left, top + 1]'], {}), '([left, top + 1])\n', (5654, 5671), True, 'import numpy as np\n'), ((8012, 8029), 'numpy.array', 'np.array', (['anchors'], {}), '(anchors)\n', (8020, 8029), True, 'import numpy as np\n'), ((9016, 9039), 'colorsys.hsv_to_rgb', 'colorsys.hsv_to_rgb', (['*x'], {}), '(*x)\n', (9035, 9039), False, 'import colorsys\n'), ((10420, 10447), 'os.path.exists', 'os.path.exists', (['output_path'], {}), '(output_path)\n', (10434, 10447), False, 'import os\n'), ((10523, 10547), 'os.makedirs', 'os.makedirs', (['output_path'], {}), '(output_path)\n', (10534, 10547), False, 'import os\n'), ((11867, 11896), 'numpy.expand_dims', 'np.expand_dims', (['image_data', '(0)'], {}), '(image_data, 0)\n', (11881, 11896), True, 'import numpy as np\n'), ((4423, 4441), 'keras.backend.learning_phase', 'K.learning_phase', ([], {}), '()\n', (4439, 4441), True, 'from keras import backend as K\n'), ((6676, 6711), 'os.path.join', 'os.path.join', (['test_path', 'image_file'], {}), '(test_path, image_file)\n', (6688, 6711), False, 'import os\n'), ((6713, 6750), 'os.path.join', 'os.path.join', (['output_path', 'image_file'], {}), '(output_path, image_file)\n', (6725, 6750), False, 'import os\n'), ((10971, 11006), 'os.path.join', 'os.path.join', (['test_path', 'image_file'], {}), '(test_path, image_file)\n', (10983, 11006), False, 'import os\n'), ((11250, 11290), 'numpy.array', 'np.array', (['resized_image'], {'dtype': '"""float32"""'}), "(resized_image, dtype='float32')\n", (11258, 11290), True, 'import numpy as np\n'), ((11717, 11757), 'numpy.array', 'np.array', (['resized_image'], {'dtype': '"""float32"""'}), "(resized_image, dtype='float32')\n", (11725, 11757), True, 'import numpy as np\n'), ((4632, 4668), 'numpy.floor', 'np.floor', (['(0.03 * image.size[1] + 0.5)'], {}), '(0.03 * image.size[1] + 0.5)\n', (4640, 4668), True, 'import numpy as np\n'), ((5174, 5193), 'numpy.floor', 'np.floor', (['(top + 0.5)'], {}), '(top + 0.5)\n', (5182, 5193), True, 'import numpy as np\n'), ((5237, 5257), 'numpy.floor', 'np.floor', (['(left + 0.5)'], {}), '(left + 0.5)\n', (5245, 5257), True, 'import numpy as np\n'), ((5315, 5337), 'numpy.floor', 'np.floor', (['(bottom + 0.5)'], {}), '(bottom + 0.5)\n', (5323, 5337), True, 'import numpy as np\n'), ((5394, 5415), 'numpy.floor', 'np.floor', (['(right + 0.5)'], {}), '(right + 0.5)\n', (5402, 5415), True, 'import numpy as np\n'), ((6575, 6612), 'os.path.join', 'os.path.join', (['output_path', 'image_file'], {}), '(output_path, image_file)\n', (6587, 6612), False, 'import os\n'), ((13351, 13372), 'PIL.ImageDraw.Draw', 'ImageDraw.Draw', (['image'], {}), '(image)\n', (13365, 13372), False, 'from PIL import Image, ImageDraw, ImageFont\n'), ((15738, 15773), 'os.path.join', 'os.path.join', (['test_path', 'image_file'], {}), '(test_path, image_file)\n', (15750, 15773), False, 'import os\n'), ((15775, 15812), 'os.path.join', 'os.path.join', (['output_path', 'image_file'], {}), '(output_path, image_file)\n', (15787, 15812), False, 'import os\n'), ((12466, 12501), 'os.path.join', 'os.path.join', (['test_path', 'image_file'], {}), '(test_path, image_file)\n', (12478, 12501), False, 'import os\n'), ((12503, 12540), 'os.path.join', 'os.path.join', (['output_path', 'image_file'], {}), '(output_path, image_file)\n', (12515, 12540), False, 'import os\n'), ((13990, 14027), 'numpy.array', 'np.array', (['[left, top - label_size[1]]'], {}), '([left, top - label_size[1]])\n', (13998, 14027), True, 'import numpy as np\n'), ((14100, 14125), 'numpy.array', 'np.array', (['[left, top + 1]'], {}), '([left, top + 1])\n', (14108, 14125), True, 'import numpy as np\n'), ((12261, 12279), 'keras.backend.learning_phase', 'K.learning_phase', ([], {}), '()\n', (12277, 12279), True, 'from keras import backend as K\n'), ((12799, 12835), 'numpy.floor', 'np.floor', (['(0.03 * image.size[1] + 0.5)'], {}), '(0.03 * image.size[1] + 0.5)\n', (12807, 12835), True, 'import numpy as np\n'), ((15527, 15562), 'os.path.join', 'os.path.join', (['test_path', 'image_file'], {}), '(test_path, image_file)\n', (15539, 15562), False, 'import os\n'), ((15564, 15601), 'os.path.join', 'os.path.join', (['output_path', 'image_file'], {}), '(output_path, image_file)\n', (15576, 15601), False, 'import os\n'), ((13530, 13549), 'numpy.floor', 'np.floor', (['(top + 0.5)'], {}), '(top + 0.5)\n', (13538, 13549), True, 'import numpy as np\n'), ((13605, 13625), 'numpy.floor', 'np.floor', (['(left + 0.5)'], {}), '(left + 0.5)\n', (13613, 13625), True, 'import numpy as np\n'), ((13695, 13717), 'numpy.floor', 'np.floor', (['(bottom + 0.5)'], {}), '(bottom + 0.5)\n', (13703, 13717), True, 'import numpy as np\n'), ((13786, 13807), 'numpy.floor', 'np.floor', (['(right + 0.5)'], {}), '(right + 0.5)\n', (13794, 13807), True, 'import numpy as np\n'), ((15312, 15349), 'os.path.join', 'os.path.join', (['output_path', 'image_file'], {}), '(output_path, image_file)\n', (15324, 15349), False, 'import os\n')]
|
import random
from collections import deque
import numpy as np
from keras.layers import Dense
from keras.models import Sequential
from keras.optimizers import Adam
class DQNAgent:
def __init__(self, state_size,
action_size,
memory_size,
hidden_layers_number,
hidden_layers_size,
learning_rate=0.001,
gamma=0.95,
sample_batch_size=32,
exploration_rate=1.0,
exploration_min=0.01,
exploration_decay=0.995):
assert hidden_layers_number > 0
self.state_size = state_size
self.action_size = action_size
self.memory = deque(maxlen=memory_size)
self.learning_rate = learning_rate
self.gamma = gamma
self.sample_batch_size = sample_batch_size
self.exploration_rate = exploration_rate
self.exploration_min = exploration_min
self.exploration_decay = exploration_decay
self.model = self._build_model(hidden_layers_number, hidden_layers_size)
self.target_model = self._build_model(hidden_layers_number, hidden_layers_size)
def _build_model(self, hidden_layers_number, hidden_layers_size):
model = Sequential()
model.add(Dense(hidden_layers_size, activation='relu', input_dim=self.state_size))
for i in range(hidden_layers_number - 1):
model.add(Dense(hidden_layers_size, activation='relu'))
model.add(Dense(self.action_size, activation='linear'))
model.compile(optimizer=Adam(lr=self.learning_rate), loss='mse')
return model
def remember(self, state, action, reward, done, next_state):
self.memory.append((state, action, reward, done, next_state))
def sync_weights(self):
self.target_model.set_weights(self.model.get_weights())
def train(self):
""" Double DQN """
if len(self.memory) < self.sample_batch_size:
return
batch = random.sample(self.memory, self.sample_batch_size)
states, actions, rewards, dones, next_states = unpack_batch(batch)
next_state_values_model_indexes = np.argmax(self.target_model.predict(next_states), axis=1)
next_state_values_target_model = self.target_model.predict(next_states)
next_state_values = np.zeros(len(states))
for i, index in enumerate(next_state_values_model_indexes):
next_state_values[i] = next_state_values_target_model[i, index]
# setting values to 0 for episodes that are done. Only rewards should be taken into calculation in this case
next_state_values *= 1 - dones
targets = next_state_values * self.gamma + rewards
# To calculate MSE based only on target (maximum) action values for each state, let's make MSE for the rest
# action values to be equal 0. For this lets predict all action values for states and replace those that are
# expected to be target(maximum) with values calculated by Bellman's equation
expected_state_action_values = self.model.predict(states)
for i in range(len(expected_state_action_values)):
expected_state_action_values[i, actions[i]] = targets[i]
self.model.fit(states, expected_state_action_values, epochs=1, verbose=0, batch_size=1)
if self.exploration_rate > self.exploration_min:
self.exploration_rate *= self.exploration_decay
def act(self, state, test_mode=False):
if (np.random.rand() <= self.exploration_rate) & (not test_mode):
return random.randrange(self.action_size)
act_values = self.model.predict(np.array(state).reshape((1, self.state_size)))
return np.argmax(act_values[0])
def unpack_batch(batch):
states, actions, rewards, dones, next_states = [], [], [], [], []
for state, action, reward, done, next_state in batch:
state = np.array(state, copy=False)
states.append(state)
actions.append(action)
rewards.append(reward)
dones.append(done)
if next_state is None:
next_states.append(state) # the result will be masked anyway
else:
next_states.append(np.array(next_state, copy=False))
return np.array(states, copy=False), np.array(actions), np.array(rewards, dtype=np.float32), \
np.array(dones, dtype=np.uint8), np.array(next_states, copy=False)
|
[
"numpy.argmax",
"random.sample",
"keras.optimizers.Adam",
"keras.layers.Dense",
"numpy.array",
"random.randrange",
"numpy.random.rand",
"keras.models.Sequential",
"collections.deque"
] |
[((721, 746), 'collections.deque', 'deque', ([], {'maxlen': 'memory_size'}), '(maxlen=memory_size)\n', (726, 746), False, 'from collections import deque\n'), ((1271, 1283), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (1281, 1283), False, 'from keras.models import Sequential\n'), ((2019, 2069), 'random.sample', 'random.sample', (['self.memory', 'self.sample_batch_size'], {}), '(self.memory, self.sample_batch_size)\n', (2032, 2069), False, 'import random\n'), ((3739, 3763), 'numpy.argmax', 'np.argmax', (['act_values[0]'], {}), '(act_values[0])\n', (3748, 3763), True, 'import numpy as np\n'), ((3935, 3962), 'numpy.array', 'np.array', (['state'], {'copy': '(False)'}), '(state, copy=False)\n', (3943, 3962), True, 'import numpy as np\n'), ((4276, 4304), 'numpy.array', 'np.array', (['states'], {'copy': '(False)'}), '(states, copy=False)\n', (4284, 4304), True, 'import numpy as np\n'), ((4306, 4323), 'numpy.array', 'np.array', (['actions'], {}), '(actions)\n', (4314, 4323), True, 'import numpy as np\n'), ((4325, 4360), 'numpy.array', 'np.array', (['rewards'], {'dtype': 'np.float32'}), '(rewards, dtype=np.float32)\n', (4333, 4360), True, 'import numpy as np\n'), ((4375, 4406), 'numpy.array', 'np.array', (['dones'], {'dtype': 'np.uint8'}), '(dones, dtype=np.uint8)\n', (4383, 4406), True, 'import numpy as np\n'), ((4408, 4441), 'numpy.array', 'np.array', (['next_states'], {'copy': '(False)'}), '(next_states, copy=False)\n', (4416, 4441), True, 'import numpy as np\n'), ((1302, 1373), 'keras.layers.Dense', 'Dense', (['hidden_layers_size'], {'activation': '"""relu"""', 'input_dim': 'self.state_size'}), "(hidden_layers_size, activation='relu', input_dim=self.state_size)\n", (1307, 1373), False, 'from keras.layers import Dense\n'), ((1511, 1555), 'keras.layers.Dense', 'Dense', (['self.action_size'], {'activation': '"""linear"""'}), "(self.action_size, activation='linear')\n", (1516, 1555), False, 'from keras.layers import Dense\n'), ((3602, 3636), 'random.randrange', 'random.randrange', (['self.action_size'], {}), '(self.action_size)\n', (3618, 3636), False, 'import random\n'), ((1447, 1491), 'keras.layers.Dense', 'Dense', (['hidden_layers_size'], {'activation': '"""relu"""'}), "(hidden_layers_size, activation='relu')\n", (1452, 1491), False, 'from keras.layers import Dense\n'), ((1589, 1616), 'keras.optimizers.Adam', 'Adam', ([], {'lr': 'self.learning_rate'}), '(lr=self.learning_rate)\n', (1593, 1616), False, 'from keras.optimizers import Adam\n'), ((3521, 3537), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (3535, 3537), True, 'import numpy as np\n'), ((4231, 4263), 'numpy.array', 'np.array', (['next_state'], {'copy': '(False)'}), '(next_state, copy=False)\n', (4239, 4263), True, 'import numpy as np\n'), ((3677, 3692), 'numpy.array', 'np.array', (['state'], {}), '(state)\n', (3685, 3692), True, 'import numpy as np\n')]
|
import sys
import greentest
import gevent
from gevent.hub import get_hub
def raise_(ex):
raise ex
MSG = 'should be re-raised and caught'
class Test(greentest.TestCase):
error_fatal = False
def test_sys_exit(self):
self.start(sys.exit, MSG)
try:
gevent.sleep(0.001)
except SystemExit as ex:
assert str(ex) == MSG, repr(str(ex))
else:
raise AssertionError('must raise SystemExit')
def test_keyboard_interrupt(self):
self.start(raise_, KeyboardInterrupt)
try:
gevent.sleep(0.001)
except KeyboardInterrupt:
pass
else:
raise AssertionError('must raise KeyboardInterrupt')
def test_system_error(self):
self.start(raise_, SystemError(MSG))
try:
gevent.sleep(0.001)
except SystemError as ex:
assert str(ex) == MSG, repr(str(ex))
else:
raise AssertionError('must raise SystemError')
def test_exception(self):
self.start(raise_, Exception('regular exception must not kill the program'))
gevent.sleep(0.001)
class TestCallback(Test):
def tearDown(self):
assert not self.x.pending, self.x
def start(self, *args):
self.x = get_hub().loop.run_callback(*args)
class TestSpawn(Test):
def tearDown(self):
gevent.sleep(0.0001)
assert self.x.dead, self.x
def start(self, *args):
self.x = gevent.spawn(*args)
del Test
if __name__ == '__main__':
greentest.main()
|
[
"greentest.main",
"gevent.hub.get_hub",
"gevent.spawn",
"gevent.sleep"
] |
[((1554, 1570), 'greentest.main', 'greentest.main', ([], {}), '()\n', (1568, 1570), False, 'import greentest\n'), ((1135, 1154), 'gevent.sleep', 'gevent.sleep', (['(0.001)'], {}), '(0.001)\n', (1147, 1154), False, 'import gevent\n'), ((1389, 1409), 'gevent.sleep', 'gevent.sleep', (['(0.0001)'], {}), '(0.0001)\n', (1401, 1409), False, 'import gevent\n'), ((1491, 1510), 'gevent.spawn', 'gevent.spawn', (['*args'], {}), '(*args)\n', (1503, 1510), False, 'import gevent\n'), ((294, 313), 'gevent.sleep', 'gevent.sleep', (['(0.001)'], {}), '(0.001)\n', (306, 313), False, 'import gevent\n'), ((580, 599), 'gevent.sleep', 'gevent.sleep', (['(0.001)'], {}), '(0.001)\n', (592, 599), False, 'import gevent\n'), ((835, 854), 'gevent.sleep', 'gevent.sleep', (['(0.001)'], {}), '(0.001)\n', (847, 854), False, 'import gevent\n'), ((1296, 1305), 'gevent.hub.get_hub', 'get_hub', ([], {}), '()\n', (1303, 1305), False, 'from gevent.hub import get_hub\n')]
|
"""
Test espei.utils classes and functions.
"""
import pickle
from tinydb import where
from espei.utils import ImmediateClient, PickleableTinyDB, MemoryStorage, \
flexible_open_string, add_bibtex_to_bib_database, bib_marker_map
from .fixtures import datasets_db, tmp_file
from .testing_data import CU_MG_TDB
MULTILINE_HIPSTER_IPSUM = """Lorem ipsum dolor amet wayfarers kale chips chillwave
adaptogen schlitz lo-fi jianbing ennui occupy pabst health goth chicharrones.
Glossier enamel pin pitchfork PBR&B ennui. Actually small batch marfa edison
bulb poutine, chicharrones neutra swag farm-to-table lyft meggings mixtape
pork belly. DIY iceland schlitz YOLO, four loko pok pok single-origin coffee
normcore. Shabby chic helvetica mustache taxidermy tattooed kombucha cliche
gastropub gentrify ramps hexagon waistcoat authentic snackwave."""
def test_immediate_client_returns_map_results_directly():
"""Calls ImmediateClient.map should return the results, instead of Futures."""
from distributed import LocalCluster
cli = ImmediateClient(LocalCluster(n_workers=1))
num_list = range(0, 11)
# square = lambda x: x**2
def square(x):
return x**2
map_result = cli.map(square, num_list)
assert map_result == [square(x) for x in num_list]
def test_pickelable_tinydb_can_be_pickled_and_unpickled():
"""PickleableTinyDB should be able to be pickled and unpickled."""
test_dict = {'test_key': ['test', 'values']}
db = PickleableTinyDB(storage=MemoryStorage)
db.insert(test_dict)
db = pickle.loads(pickle.dumps(db))
assert db.search(where('test_key').exists())[0] == test_dict
def test_flexible_open_string_raw_string():
"""Raw multiline strings should be directly returned by flexible_open_string."""
returned_string = flexible_open_string(MULTILINE_HIPSTER_IPSUM)
assert returned_string == MULTILINE_HIPSTER_IPSUM
def test_flexible_open_string_file_like(tmp_file):
"""File-like objects support read methods should have their content returned by flexible_open_string."""
fname = tmp_file(MULTILINE_HIPSTER_IPSUM)
with open(fname) as fp:
returned_string = flexible_open_string(fp)
assert returned_string == MULTILINE_HIPSTER_IPSUM
def test_flexible_open_string_path_like(tmp_file):
"""Path-like strings should be opened, read and returned"""
fname = tmp_file(MULTILINE_HIPSTER_IPSUM)
returned_string = flexible_open_string(fname)
assert returned_string == MULTILINE_HIPSTER_IPSUM
def test_adding_bibtex_entries_to_bibliography_db(datasets_db):
"""Adding a BibTeX entries to a database works and the database can be searched."""
TEST_BIBTEX = """@article{Roe1952gamma,
author = {<NAME>. and <NAME>.},
journal = {Trans. Am. Soc. Met.},
keywords = {Fe-Cr,Fe-Ti,Fe-Ti-Cr},
pages = {1030--1041},
title = {{Gamma Loop Studies in the Fe-Ti, Fe-Cr, and Fe-Ti-Cr Systems}},
volume = {44},
year = {1952}
}
@phdthesis{shin2007thesis,
author = {<NAME>},
keywords = {Al-Cu,Al-Cu-Mg,Al-Cu-Si,Al-Mg,Al-Mg-Si,Al-Si,Cu-Mg,Mg-Si,SQS},
number = {May},
school = {The Pennsylvania State University},
title = {{Thermodynamic properties of solid solutions from special quasirandom structures and CALPHAD modeling: Application to aluminum-copper-magnesium-silicon and hafnium-silicon-oxygen}},
year = {2007}
}"""
db = add_bibtex_to_bib_database(TEST_BIBTEX, datasets_db)
search_res = db.search(where('ID') == 'Roe1952gamma')
assert len(search_res) == 1
assert len(db.all()) == 2
def test_bib_marker_map():
"""bib_marker_map should return a proper dict"""
marker_dict = bib_marker_map(['otis2016', 'bocklund2018'])
EXEMPLAR_DICT = {
'bocklund2018': {
'formatted': 'bocklund2018',
'markers': {'fillstyle': 'none', 'marker': 'o'}
},
'otis2016': {
'formatted': 'otis2016',
'markers': {'fillstyle': 'none', 'marker': 'v'}
}
}
assert EXEMPLAR_DICT == marker_dict
|
[
"espei.utils.add_bibtex_to_bib_database",
"espei.utils.PickleableTinyDB",
"espei.utils.bib_marker_map",
"distributed.LocalCluster",
"tinydb.where",
"espei.utils.flexible_open_string",
"pickle.dumps"
] |
[((1467, 1506), 'espei.utils.PickleableTinyDB', 'PickleableTinyDB', ([], {'storage': 'MemoryStorage'}), '(storage=MemoryStorage)\n', (1483, 1506), False, 'from espei.utils import ImmediateClient, PickleableTinyDB, MemoryStorage, flexible_open_string, add_bibtex_to_bib_database, bib_marker_map\n'), ((1790, 1835), 'espei.utils.flexible_open_string', 'flexible_open_string', (['MULTILINE_HIPSTER_IPSUM'], {}), '(MULTILINE_HIPSTER_IPSUM)\n', (1810, 1835), False, 'from espei.utils import ImmediateClient, PickleableTinyDB, MemoryStorage, flexible_open_string, add_bibtex_to_bib_database, bib_marker_map\n'), ((2416, 2443), 'espei.utils.flexible_open_string', 'flexible_open_string', (['fname'], {}), '(fname)\n', (2436, 2443), False, 'from espei.utils import ImmediateClient, PickleableTinyDB, MemoryStorage, flexible_open_string, add_bibtex_to_bib_database, bib_marker_map\n'), ((3327, 3379), 'espei.utils.add_bibtex_to_bib_database', 'add_bibtex_to_bib_database', (['TEST_BIBTEX', 'datasets_db'], {}), '(TEST_BIBTEX, datasets_db)\n', (3353, 3379), False, 'from espei.utils import ImmediateClient, PickleableTinyDB, MemoryStorage, flexible_open_string, add_bibtex_to_bib_database, bib_marker_map\n'), ((3599, 3643), 'espei.utils.bib_marker_map', 'bib_marker_map', (["['otis2016', 'bocklund2018']"], {}), "(['otis2016', 'bocklund2018'])\n", (3613, 3643), False, 'from espei.utils import ImmediateClient, PickleableTinyDB, MemoryStorage, flexible_open_string, add_bibtex_to_bib_database, bib_marker_map\n'), ((1058, 1083), 'distributed.LocalCluster', 'LocalCluster', ([], {'n_workers': '(1)'}), '(n_workers=1)\n', (1070, 1083), False, 'from distributed import LocalCluster\n'), ((1554, 1570), 'pickle.dumps', 'pickle.dumps', (['db'], {}), '(db)\n', (1566, 1570), False, 'import pickle\n'), ((2152, 2176), 'espei.utils.flexible_open_string', 'flexible_open_string', (['fp'], {}), '(fp)\n', (2172, 2176), False, 'from espei.utils import ImmediateClient, PickleableTinyDB, MemoryStorage, flexible_open_string, add_bibtex_to_bib_database, bib_marker_map\n'), ((3407, 3418), 'tinydb.where', 'where', (['"""ID"""'], {}), "('ID')\n", (3412, 3418), False, 'from tinydb import where\n'), ((1593, 1610), 'tinydb.where', 'where', (['"""test_key"""'], {}), "('test_key')\n", (1598, 1610), False, 'from tinydb import where\n')]
|
import mnist_loader
import network
training_data, validation_data, test_data = mnist_loader.load_data_wrapper()
# print("training data")
# print(type(training_data))
# print(len(training_data))
# print(training_data[0][0].shape)
# print(training_data[0][1].shape)
net = network.Network([784, 30, 10])
net.SGD(training_data, 30, 10, 3.0, test_data=test_data)
|
[
"network.Network",
"mnist_loader.load_data_wrapper"
] |
[((80, 112), 'mnist_loader.load_data_wrapper', 'mnist_loader.load_data_wrapper', ([], {}), '()\n', (110, 112), False, 'import mnist_loader\n'), ((272, 302), 'network.Network', 'network.Network', (['[784, 30, 10]'], {}), '([784, 30, 10])\n', (287, 302), False, 'import network\n')]
|
############################
# Libraries used as plugins
############################
# try:
# import katana as _
# has_katana = True
# except ImportError:
# has_katana = False
import metagraph
# Use this as the entry_point object
registry = metagraph.PluginRegistry("metagraph_katana")
def find_plugins():
# Ensure we import all items we want registered
from . import metagraph_katana
registry.register_from_modules(metagraph_katana)
return registry.plugins
|
[
"metagraph.PluginRegistry"
] |
[((259, 303), 'metagraph.PluginRegistry', 'metagraph.PluginRegistry', (['"""metagraph_katana"""'], {}), "('metagraph_katana')\n", (283, 303), False, 'import metagraph\n')]
|
import sys
sys.path.insert(0, '../')
import warnings
warnings.simplefilter('ignore')
from get_config import get_config
from utils import fix_seed, rle2mask, mask2rle
from models import build_model
from utils_inference import get_pred_mask, get_rle
from get_fold_idxs_list import get_fold_idxs_list
import numpy as np
import pandas as pd
import os
from os.path import join as opj
import gc
import cv2
import rasterio
from rasterio.windows import Window
import torch
if __name__ == '__main__':
# config
fix_seed(2021)
config = get_config()
INPUT_PATH = config['INPUT_PATH']
OUTPUT_PATH = config['OUTPUT_PATH']
os.makedirs(OUTPUT_PATH, exist_ok=True)
device = config['device']
print(device)
# import data
train_df = pd.read_csv(opj(INPUT_PATH, 'train.csv'))
info_df = pd.read_csv(opj(INPUT_PATH, 'HuBMAP-20-dataset_information.csv'))
sub_df = pd.read_csv(opj(INPUT_PATH, 'sample_submission.csv'))
print('train_df.shape = ', train_df.shape)
print('info_df.shape = ', info_df.shape)
print('sub_df.shape = ', sub_df.shape)
# inference
LOAD_LOCAL_WEIGHT_PATH_LIST = {}
for seed in config['split_seed_list']:
LOAD_LOCAL_WEIGHT_PATH_LIST[seed] = []
for fold in config['FOLD_LIST']:
LOAD_LOCAL_WEIGHT_PATH_LIST[seed].append(
opj(config['model_path'], f'model_seed{seed}_fold{fold}_bestscore.pth'))
model_list = {}
for seed in config['split_seed_list']:
model_list[seed] = []
for path in LOAD_LOCAL_WEIGHT_PATH_LIST[seed]:
print("Loading weights from %s" % path)
model = build_model(model_name=config['model_name'],
resolution=(None, None),
deepsupervision=config['deepsupervision'],
clfhead=config['clfhead'],
clf_threshold=config['clf_threshold'],
load_weights=False).to(device)
model.load_state_dict(torch.load(path))
model.eval()
model_list[seed].append(model)
# pseudo-label for test data
val_patient_numbers_list = [
[68250], # fold0
[65631], # fold1
[67177], # fold2
]
test_patient_numbers_list = [
[63921], # fold0
[63921], # fold1
[63921], # fold2
]
_, _, tst_idxs_list = get_fold_idxs_list(info_df, val_patient_numbers_list, test_patient_numbers_list)
print("test index list: {}".format(tst_idxs_list))
train_df['predicted'] = None
for idx in tst_idxs_list:
print('idx = ', idx)
pred_mask, h, w = get_pred_mask(idx, train_df, info_df, model_list, mode='train')
rle = get_rle(pred_mask, h, w)
train_df.loc[idx, 'predicted'] = rle
train_df.to_csv(opj(OUTPUT_PATH, 'test_mask_predicted.csv'), index=False)
# pseudo-label for test data
# for idx in range(len(sub_df)):
# print('idx = ', idx)
# pred_mask,h,w = get_pred_mask(idx, sub_df, model_list, mode='test')
# rle = get_rle(pred_mask,h,w)
# sub_df.loc[idx,'predicted'] = rle
# sub_df.to_csv(opj(OUTPUT_PATH, 'pseudo_test.csv'), index=False)
|
[
"os.makedirs",
"warnings.simplefilter",
"torch.load",
"sys.path.insert",
"utils_inference.get_rle",
"models.build_model",
"get_fold_idxs_list.get_fold_idxs_list",
"utils_inference.get_pred_mask",
"os.path.join",
"get_config.get_config",
"utils.fix_seed"
] |
[((12, 37), 'sys.path.insert', 'sys.path.insert', (['(0)', '"""../"""'], {}), "(0, '../')\n", (27, 37), False, 'import sys\n'), ((55, 86), 'warnings.simplefilter', 'warnings.simplefilter', (['"""ignore"""'], {}), "('ignore')\n", (76, 86), False, 'import warnings\n'), ((514, 528), 'utils.fix_seed', 'fix_seed', (['(2021)'], {}), '(2021)\n', (522, 528), False, 'from utils import fix_seed, rle2mask, mask2rle\n'), ((542, 554), 'get_config.get_config', 'get_config', ([], {}), '()\n', (552, 554), False, 'from get_config import get_config\n'), ((637, 676), 'os.makedirs', 'os.makedirs', (['OUTPUT_PATH'], {'exist_ok': '(True)'}), '(OUTPUT_PATH, exist_ok=True)\n', (648, 676), False, 'import os\n'), ((2426, 2511), 'get_fold_idxs_list.get_fold_idxs_list', 'get_fold_idxs_list', (['info_df', 'val_patient_numbers_list', 'test_patient_numbers_list'], {}), '(info_df, val_patient_numbers_list, test_patient_numbers_list\n )\n', (2444, 2511), False, 'from get_fold_idxs_list import get_fold_idxs_list\n'), ((771, 799), 'os.path.join', 'opj', (['INPUT_PATH', '"""train.csv"""'], {}), "(INPUT_PATH, 'train.csv')\n", (774, 799), True, 'from os.path import join as opj\n'), ((827, 879), 'os.path.join', 'opj', (['INPUT_PATH', '"""HuBMAP-20-dataset_information.csv"""'], {}), "(INPUT_PATH, 'HuBMAP-20-dataset_information.csv')\n", (830, 879), True, 'from os.path import join as opj\n'), ((906, 946), 'os.path.join', 'opj', (['INPUT_PATH', '"""sample_submission.csv"""'], {}), "(INPUT_PATH, 'sample_submission.csv')\n", (909, 946), True, 'from os.path import join as opj\n'), ((2681, 2744), 'utils_inference.get_pred_mask', 'get_pred_mask', (['idx', 'train_df', 'info_df', 'model_list'], {'mode': '"""train"""'}), "(idx, train_df, info_df, model_list, mode='train')\n", (2694, 2744), False, 'from utils_inference import get_pred_mask, get_rle\n'), ((2759, 2783), 'utils_inference.get_rle', 'get_rle', (['pred_mask', 'h', 'w'], {}), '(pred_mask, h, w)\n', (2766, 2783), False, 'from utils_inference import get_pred_mask, get_rle\n'), ((2849, 2892), 'os.path.join', 'opj', (['OUTPUT_PATH', '"""test_mask_predicted.csv"""'], {}), "(OUTPUT_PATH, 'test_mask_predicted.csv')\n", (2852, 2892), True, 'from os.path import join as opj\n'), ((1339, 1410), 'os.path.join', 'opj', (["config['model_path']", 'f"""model_seed{seed}_fold{fold}_bestscore.pth"""'], {}), "(config['model_path'], f'model_seed{seed}_fold{fold}_bestscore.pth')\n", (1342, 1410), True, 'from os.path import join as opj\n'), ((2036, 2052), 'torch.load', 'torch.load', (['path'], {}), '(path)\n', (2046, 2052), False, 'import torch\n'), ((1632, 1838), 'models.build_model', 'build_model', ([], {'model_name': "config['model_name']", 'resolution': '(None, None)', 'deepsupervision': "config['deepsupervision']", 'clfhead': "config['clfhead']", 'clf_threshold': "config['clf_threshold']", 'load_weights': '(False)'}), "(model_name=config['model_name'], resolution=(None, None),\n deepsupervision=config['deepsupervision'], clfhead=config['clfhead'],\n clf_threshold=config['clf_threshold'], load_weights=False)\n", (1643, 1838), False, 'from models import build_model\n')]
|
# coding=utf-8
# Copyright 2021 <NAME> and the HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License
""" Tokenization classes for BARTpho-syllable model."""
import os
from collections import defaultdict
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple, Union
from ...tokenization_utils import AddedToken
from ...tokenization_utils_base import EncodingFast
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_bartpho import BartphoTokenizer
else:
BartphoTokenizer = None
logger = logging.get_logger(__name__)
VOCAB_FILES_NAMES = {
"vocab_file": "sentencepiece.bpe.model",
"monolingual_vocab_file": "dict.txt",
"tokenizer_file": "tokenizer.json",
}
PRETRAINED_VOCAB_FILES_MAP = {
"vocab_file": {
"vinai/bartpho-syllable": "https://huggingface.co/vinai/bartpho-syllable/resolve/main/sentencepiece.bpe.model",
},
"monolingual_vocab_file": {
"vinai/bartpho-syllable": "https://huggingface.co/vinai/bartpho-syllable/resolve/main/dict.txt",
},
"tokenizer_file": {
"vinai/bartpho-syllable": "https://huggingface.co/vinai/bartpho-syllable/resolve/main/tokenizer.json",
},
}
PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = {"vinai/bartpho-syllable": 1024}
class BartphoTokenizerFast(PreTrainedTokenizerFast):
"""
Construct a "fast" BARTpho tokenizer (backed by HuggingFace's *tokenizers* library). Adapted from
[`XLMRobertaTokenizerFast`]. Based on [SentencePiece](https://github.com/google/sentencepiece).
This tokenizer inherits from [`PreTrainedTokenizerFast`] which contains most of the main methods. Users should
refer to this superclass for more information regarding those methods.
Args:
vocab_file (`str`):
Path to the vocabulary file.
bos_token (`str`, *optional*, defaults to `"<s>"`):
The beginning of sequence token that was used during pretraining. Can be used a sequence classifier token.
<Tip>
When building a sequence using special tokens, this is not the token that is used for the beginning of
sequence. The token used is the `cls_token`.
</Tip>
eos_token (`str`, *optional*, defaults to `"</s>"`):
The end of sequence token.
<Tip>
When building a sequence using special tokens, this is not the token that is used for the end of sequence.
The token used is the `sep_token`.
</Tip>
sep_token (`str`, *optional*, defaults to `"</s>"`):
The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for
sequence classification or for a text and a question for question answering. It is also used as the last
token of a sequence built with special tokens.
cls_token (`str`, *optional*, defaults to `"<s>"`):
The classifier token which is used when doing sequence classification (classification of the whole sequence
instead of per-token classification). It is the first token of the sequence when built with special tokens.
unk_token (`str`, *optional*, defaults to `"<unk>"`):
The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
token instead.
pad_token (`str`, *optional*, defaults to `"<pad>"`):
The token used for padding, for example when batching sequences of different lengths.
mask_token (`str`, *optional*, defaults to `"<mask>"`):
The token used for masking values. This is the token used when training this model with masked language
modeling. This is the token which the model will try to predict.
additional_special_tokens (`List[str]`, *optional*, defaults to `["<s>NOTUSED", "</s>NOTUSED"]`):
Additional special tokens used by the tokenizer.
"""
vocab_files_names = VOCAB_FILES_NAMES
pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
model_input_names = ["input_ids", "attention_mask"]
slow_tokenizer_class = BartphoTokenizer
def __init__(
self,
vocab_file=None,
monolingual_vocab_file=None,
tokenizer_file=None,
bos_token="<s>",
eos_token="</s>",
sep_token="</s>",
cls_token="<s>",
unk_token="<unk>",
pad_token="<pad>",
mask_token="<mask>",
**kwargs
):
# Mask token behave like a normal word, i.e. include the space before it
mask_token = AddedToken(mask_token, lstrip=True, rstrip=False) if isinstance(mask_token, str) else mask_token
super().__init__(
vocab_file,
monolingual_vocab_file,
tokenizer_file=tokenizer_file,
bos_token=bos_token,
eos_token=eos_token,
sep_token=sep_token,
cls_token=cls_token,
unk_token=unk_token,
pad_token=pad_token,
mask_token=mask_token,
**kwargs,
)
self.vocab_file = vocab_file
self.monolingual_vocab_file = monolingual_vocab_file
self.can_save_slow_tokenizer = False if not self.vocab_file else True
def get_added_vocab_hacking(self):
"""
Returns the added tokens in the vocabulary as a dictionary of token to index.
Returns:
`Dict[str, int], Dict[int, int]`: The added tokens, and their original and new ids
"""
base_vocab_size = self._tokenizer.get_vocab_size(with_added_tokens=False)
full_vocab_size = self._tokenizer.get_vocab_size(with_added_tokens=True)
if full_vocab_size == base_vocab_size:
return {}, {}
# Tokens in added_vocab should have ids that are equal to or larger than the size of base_vocab
added_vocab = dict(
(self._tokenizer.id_to_token(index), index + 1 - base_vocab_size + self.mask_token_id)
for index in range(base_vocab_size, full_vocab_size)
)
id_mapping = dict((index, self._tokenizer.token_to_id(tok)) for tok, index in added_vocab.items())
return added_vocab, id_mapping
def _decode(
self,
token_ids: Union[int, List[int]],
skip_special_tokens: bool = False,
clean_up_tokenization_spaces: bool = True,
**kwargs
) -> str:
self._decode_use_source_tokenizer = kwargs.pop("use_source_tokenizer", False)
if isinstance(token_ids, int):
token_ids = [token_ids]
# Mapping ids into their original values
_, id_mapping = self.get_added_vocab_hacking()
if len(id_mapping) > 0:
token_ids = [id_mapping[id] if id in id_mapping else id for id in token_ids]
text = self._tokenizer.decode(token_ids, skip_special_tokens=skip_special_tokens)
if clean_up_tokenization_spaces:
clean_text = self.clean_up_tokenization(text)
return clean_text
else:
return text
def _convert_encoding(
self,
encoding: EncodingFast,
return_token_type_ids: Optional[bool] = None,
return_attention_mask: Optional[bool] = None,
return_overflowing_tokens: bool = False,
return_special_tokens_mask: bool = False,
return_offsets_mapping: bool = False,
return_length: bool = False,
verbose: bool = True,
) -> Tuple[Dict[str, Any], List[EncodingFast]]:
"""
Convert the encoding representation (from low-level HuggingFace tokenizer output) to a python Dict and a list
of encodings, take care of building a batch from overflowing tokens.
Overflowing tokens are converted to additional examples (like batches) so the output values of the dict are
lists (overflows) of lists (tokens).
Output shape: (overflows, sequence length)
"""
if return_token_type_ids is None:
return_token_type_ids = "token_type_ids" in self.model_input_names
if return_attention_mask is None:
return_attention_mask = "attention_mask" in self.model_input_names
if return_overflowing_tokens and encoding.overflowing is not None:
encodings = [encoding] + encoding.overflowing
else:
encodings = [encoding]
encoding_dict = defaultdict(list)
added_vocab, _ = self.get_added_vocab_hacking()
for e in encodings:
# encoding_dict["input_ids"].append(e.ids)
# Reassign ids of tokens due to the hacking strategy
ids = []
for id, token in zip(e.ids, e.tokens):
if id <= self.mask_token_id:
ids.append(id)
else:
if token.strip() in added_vocab:
ids.append(added_vocab[token.strip()])
else:
ids.append(self.unk_token_id)
encoding_dict["input_ids"].append(ids)
if return_token_type_ids:
encoding_dict["token_type_ids"].append(e.type_ids)
if return_attention_mask:
encoding_dict["attention_mask"].append(e.attention_mask)
if return_special_tokens_mask:
encoding_dict["special_tokens_mask"].append(e.special_tokens_mask)
if return_offsets_mapping:
encoding_dict["offset_mapping"].append(e.offsets)
if return_length:
# encoding_dict["length"].append(len(e.ids))
encoding_dict["length"].append(len(ids))
return encoding_dict, encodings
def build_inputs_with_special_tokens(
self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
) -> List[int]:
"""
Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
adding special tokens. A BARTpho sequence has the following format:
- single sequence: `<s> X </s>`
- pair of sequences: `<s> A </s></s> B </s>`
Args:
token_ids_0 (`List[int]`):
List of IDs to which the special tokens will be added.
token_ids_1 (`List[int]`, *optional*):
Optional second list of IDs for sequence pairs.
Returns:
`List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.
"""
if token_ids_1 is None:
return [self.cls_token_id] + token_ids_0 + [self.sep_token_id]
cls = [self.cls_token_id]
sep = [self.sep_token_id]
return cls + token_ids_0 + sep + sep + token_ids_1 + sep
def create_token_type_ids_from_sequences(
self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
) -> List[int]:
"""
Create a mask from the two sequences passed to be used in a sequence-pair classification task. BARTpho does not
make use of token type ids, therefore a list of zeros is returned.
Args:
token_ids_0 (`List[int]`):
List of IDs.
token_ids_1 (`List[int]`, *optional*):
Optional second list of IDs for sequence pairs.
Returns:
`List[int]`: List of zeros.
"""
sep = [self.sep_token_id]
cls = [self.cls_token_id]
if token_ids_1 is None:
return len(cls + token_ids_0 + sep) * [0]
return len(cls + token_ids_0 + sep + sep + token_ids_1 + sep) * [0]
def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
if not self.can_save_slow_tokenizer:
raise ValueError(
"Your fast tokenizer does not have the necessary information to save the vocabulary for a "
"slow tokenizer."
)
if not os.path.isdir(save_directory):
logger.error(f"Vocabulary path ({save_directory}) should be a directory.")
return
out_vocab_file = os.path.join(
save_directory,
(filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"],
)
out_monolingual_vocab_file = os.path.join(
save_directory,
(filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["monolingual_vocab_file"],
)
if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file):
copyfile(self.vocab_file, out_vocab_file)
if os.path.abspath(self.monolingual_vocab_file) != os.path.abspath(out_monolingual_vocab_file):
copyfile(self.monolingual_vocab_file, out_monolingual_vocab_file)
return (out_vocab_file, out_monolingual_vocab_file)
|
[
"os.path.abspath",
"os.path.isdir",
"collections.defaultdict",
"shutil.copyfile",
"os.path.join"
] |
[((9077, 9094), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (9088, 9094), False, 'from collections import defaultdict\n'), ((12786, 12904), 'os.path.join', 'os.path.join', (['save_directory', "((filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES[\n 'vocab_file'])"], {}), "(save_directory, (filename_prefix + '-' if filename_prefix else\n '') + VOCAB_FILES_NAMES['vocab_file'])\n", (12798, 12904), False, 'import os\n'), ((12974, 13104), 'os.path.join', 'os.path.join', (['save_directory', "((filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES[\n 'monolingual_vocab_file'])"], {}), "(save_directory, (filename_prefix + '-' if filename_prefix else\n '') + VOCAB_FILES_NAMES['monolingual_vocab_file'])\n", (12986, 13104), False, 'import os\n'), ((12623, 12652), 'os.path.isdir', 'os.path.isdir', (['save_directory'], {}), '(save_directory)\n', (12636, 12652), False, 'import os\n'), ((13148, 13180), 'os.path.abspath', 'os.path.abspath', (['self.vocab_file'], {}), '(self.vocab_file)\n', (13163, 13180), False, 'import os\n'), ((13184, 13215), 'os.path.abspath', 'os.path.abspath', (['out_vocab_file'], {}), '(out_vocab_file)\n', (13199, 13215), False, 'import os\n'), ((13229, 13270), 'shutil.copyfile', 'copyfile', (['self.vocab_file', 'out_vocab_file'], {}), '(self.vocab_file, out_vocab_file)\n', (13237, 13270), False, 'from shutil import copyfile\n'), ((13283, 13327), 'os.path.abspath', 'os.path.abspath', (['self.monolingual_vocab_file'], {}), '(self.monolingual_vocab_file)\n', (13298, 13327), False, 'import os\n'), ((13331, 13374), 'os.path.abspath', 'os.path.abspath', (['out_monolingual_vocab_file'], {}), '(out_monolingual_vocab_file)\n', (13346, 13374), False, 'import os\n'), ((13388, 13453), 'shutil.copyfile', 'copyfile', (['self.monolingual_vocab_file', 'out_monolingual_vocab_file'], {}), '(self.monolingual_vocab_file, out_monolingual_vocab_file)\n', (13396, 13453), False, 'from shutil import copyfile\n')]
|
#!/usr/bin/env python
from ansible.module_utils.basic import *
import requests
import os
from dnsimple import DNSimple
'''
parameters:
api_token DNSimple token for account
account_id DNSimple account id
domain Zone from DNSimple
record JSON object, includes type, name, content, ttl
example record:
record={
'type':'A',
'name':'control',
'content':'192.168.10.1',
'ttl':'60'
}
state = 'present' or 'absent'
'''
def create_record(data):
nonexistant=False
record = {
'type': data['type'],
'name': data['name'],
'content': data['content'],
'ttl': data['ttl']
}
dns=DNSimple(api_token=data['dnsimple_token'],account_id=data['dnsimple_account'])
if 'present' in data['state']:
for n in dns.records(data['domain']):
if record['name'] == n['record']['name']:
res=dns.update_record(data['domain'],n['record']['id'],record)
nonexistant=False
return (True, res['record']['id'], 'record updated')
else:
nonexistant=True
if nonexistant:
res=dns.add_record(data['domain'], record)
return (True, res['record']['id'], 'record added')
return (False, "{}", 'no record added')
def delete_record(data):
dns=DNSimple(api_token=data['dnsimple_token'],account_id=data['dnsimple_account'])
if 'absent' in data['state']:
for n in dns.records(data['domain']):
if data['name'] == n['record']['name']:
dns.delete_record(data['domain'],n['record']['id'])
return (True, None, 'record deleted')
return (False, None, 'no record deleted')
def main():
fields = {
"type": {"required": False, "default": "A", "type": "str"},
"name": {"required": True, "type": "str"},
"content": {"required": True, "type": "str"},
"ttl": {"required": False, "default": "600", "type": "str"},
"domain": {"required": True, "type": "str"},
"dnsimple_token": {"required": True, "type": "str"},
"dnsimple_account": {"required": True, "type": "str"},
"state": {"default": "present","choices": ['present', 'absent'],"type": 'str'}
}
choice_map = {
"present": create_record,
"absent": delete_record
}
module = AnsibleModule(argument_spec=fields)
has_changed, record_id, result = choice_map.get(module.params['state'])(module.params)
module.exit_json(changed=has_changed, record_id=record_id, meta=result)
if __name__ == '__main__':
main()
|
[
"dnsimple.DNSimple"
] |
[((664, 743), 'dnsimple.DNSimple', 'DNSimple', ([], {'api_token': "data['dnsimple_token']", 'account_id': "data['dnsimple_account']"}), "(api_token=data['dnsimple_token'], account_id=data['dnsimple_account'])\n", (672, 743), False, 'from dnsimple import DNSimple\n'), ((1332, 1411), 'dnsimple.DNSimple', 'DNSimple', ([], {'api_token': "data['dnsimple_token']", 'account_id': "data['dnsimple_account']"}), "(api_token=data['dnsimple_token'], account_id=data['dnsimple_account'])\n", (1340, 1411), False, 'from dnsimple import DNSimple\n')]
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright 2018 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# pylint: disable=module-missing-docstring,class-missing-docstring
from __future__ import print_function
import json
import os
import cros_config_test_schema
import libcros_schema
from chromite.lib import cros_test_lib
from chromite.lib import osutils
BASIC_CONFIG = """
mosys-base: &mosys_base_cmds
name: 'mosys'
args:
- 'platform id'
- 'platform name'
nautilus-mosys-base: &nautilus_mosys_cmds
name: 'mosys'
args:
- 'platform version'
cros-config-base: &cros_config_base_cmds
name: 'cros-config'
args:
- '/ brand-name'
cros-config-lte: &cros_config_lte_cmds
name: 'cros-config'
args:
- '/arc/build-properties device'
chromeos:
devices:
- device-name: 'nautilus'
command-groups:
- *mosys_base_cmds
- *nautilus_mosys_cmds
- *cros_config_base_cmds
- device-name: 'nautiluslte'
command-groups:
- *mosys_base_cmds
- *nautilus_mosys_cmds
- *cros_config_base_cmds
- *cros_config_lte_cmds
"""
this_dir = os.path.dirname(__file__)
class ParseArgsTests(cros_test_lib.TestCase):
def testParseArgs(self):
argv = ['-s', 'schema', '-c', 'config', '-f', 'nautilus', '-o', 'output']
opts = cros_config_test_schema.ParseArgs(argv)
self.assertEqual(opts.schema, 'schema')
self.assertEqual(opts.config, 'config')
self.assertEqual(opts.filter, 'nautilus')
self.assertEqual(opts.output, 'output')
class TransformConfigTests(cros_test_lib.TestCase):
def testBasicTransform(self):
result = cros_config_test_schema.TransformConfig(BASIC_CONFIG)
json_dict = json.loads(result)
self.assertEqual(1, len(json_dict))
json_obj = libcros_schema.GetNamedTuple(json_dict)
self.assertEqual(2, len(json_obj.chromeos.devices))
device = json_obj.chromeos.devices[0]
self.assertEqual('nautilus', device.device_name)
self.assertEqual(3, len(device.command_groups))
device = json_obj.chromeos.devices[1]
self.assertEqual('nautiluslte', device.device_name)
self.assertEqual(4, len(device.command_groups))
def testTransformConfig_NoMatch(self):
result = cros_config_test_schema.TransformConfig(
BASIC_CONFIG, device_filter='abc123')
json_dict = json.loads(result)
json_obj = libcros_schema.GetNamedTuple(json_dict)
self.assertEqual(0, len(json_obj.chromeos.devices))
def testTransformConfig_FilterMatch(self):
result = cros_config_test_schema.TransformConfig(
BASIC_CONFIG, device_filter='nautilus')
json_dict = json.loads(result)
json_obj = libcros_schema.GetNamedTuple(json_dict)
self.assertEqual(1, len(json_obj.chromeos.devices))
device = json_obj.chromeos.devices[0]
self.assertEqual('nautilus', device.device_name)
self.assertEqual(3, len(device.command_groups))
class MainTests(cros_test_lib.TempDirTestCase):
def testMainImportNoFilter(self):
output = os.path.join(self.tempdir, 'output.json')
cros_config_test_schema.Start(
os.path.join(this_dir, 'test_data/cros_config_test_device.yaml'),
None,
output,
None)
json_dict = json.loads(osutils.ReadFile(output))
json_obj = libcros_schema.GetNamedTuple(json_dict)
self.assertEqual(2, len(json_obj.chromeos.devices))
device = json_obj.chromeos.devices[0]
self.assertEqual('nautilus', device.device_name)
self.assertEqual(3, len(device.command_groups))
device = json_obj.chromeos.devices[1]
self.assertEqual('nautiluslte', device.device_name)
self.assertEqual(4, len(device.command_groups))
def testMainImportFilterNautilus(self):
output = os.path.join(self.tempdir, 'output.json')
cros_config_test_schema.Start(
os.path.join(this_dir, 'test_data/cros_config_test_device.yaml'),
'nautilus',
output,
None)
json_dict = json.loads(osutils.ReadFile(output))
json_obj = libcros_schema.GetNamedTuple(json_dict)
self.assertEqual(1, len(json_obj.chromeos.devices))
device = json_obj.chromeos.devices[0]
self.assertEqual('nautilus', device.device_name)
self.assertEqual(3, len(device.command_groups))
def testMainImportFilterNautilusLte(self):
output = os.path.join(self.tempdir, 'output.json')
cros_config_test_schema.Start(
os.path.join(this_dir, 'test_data/cros_config_test_device.yaml'),
'nautiluslte',
output,
None)
json_dict = json.loads(osutils.ReadFile(output))
json_obj = libcros_schema.GetNamedTuple(json_dict)
self.assertEqual(1, len(json_obj.chromeos.devices))
device = json_obj.chromeos.devices[0]
self.assertEqual('nautiluslte', device.device_name)
self.assertEqual(4, len(device.command_groups))
if __name__ == '__main__':
cros_test_lib.main(module=__name__)
|
[
"json.loads",
"os.path.dirname",
"chromite.lib.osutils.ReadFile",
"cros_config_test_schema.TransformConfig",
"libcros_schema.GetNamedTuple",
"chromite.lib.cros_test_lib.main",
"cros_config_test_schema.ParseArgs",
"os.path.join"
] |
[((1258, 1283), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (1273, 1283), False, 'import os\n'), ((4972, 5007), 'chromite.lib.cros_test_lib.main', 'cros_test_lib.main', ([], {'module': '__name__'}), '(module=__name__)\n', (4990, 5007), False, 'from chromite.lib import cros_test_lib\n'), ((1449, 1488), 'cros_config_test_schema.ParseArgs', 'cros_config_test_schema.ParseArgs', (['argv'], {}), '(argv)\n', (1482, 1488), False, 'import cros_config_test_schema\n'), ((1767, 1820), 'cros_config_test_schema.TransformConfig', 'cros_config_test_schema.TransformConfig', (['BASIC_CONFIG'], {}), '(BASIC_CONFIG)\n', (1806, 1820), False, 'import cros_config_test_schema\n'), ((1837, 1855), 'json.loads', 'json.loads', (['result'], {}), '(result)\n', (1847, 1855), False, 'import json\n'), ((1912, 1951), 'libcros_schema.GetNamedTuple', 'libcros_schema.GetNamedTuple', (['json_dict'], {}), '(json_dict)\n', (1940, 1951), False, 'import libcros_schema\n'), ((2362, 2439), 'cros_config_test_schema.TransformConfig', 'cros_config_test_schema.TransformConfig', (['BASIC_CONFIG'], {'device_filter': '"""abc123"""'}), "(BASIC_CONFIG, device_filter='abc123')\n", (2401, 2439), False, 'import cros_config_test_schema\n'), ((2465, 2483), 'json.loads', 'json.loads', (['result'], {}), '(result)\n', (2475, 2483), False, 'import json\n'), ((2499, 2538), 'libcros_schema.GetNamedTuple', 'libcros_schema.GetNamedTuple', (['json_dict'], {}), '(json_dict)\n', (2527, 2538), False, 'import libcros_schema\n'), ((2654, 2733), 'cros_config_test_schema.TransformConfig', 'cros_config_test_schema.TransformConfig', (['BASIC_CONFIG'], {'device_filter': '"""nautilus"""'}), "(BASIC_CONFIG, device_filter='nautilus')\n", (2693, 2733), False, 'import cros_config_test_schema\n'), ((2759, 2777), 'json.loads', 'json.loads', (['result'], {}), '(result)\n', (2769, 2777), False, 'import json\n'), ((2793, 2832), 'libcros_schema.GetNamedTuple', 'libcros_schema.GetNamedTuple', (['json_dict'], {}), '(json_dict)\n', (2821, 2832), False, 'import libcros_schema\n'), ((3136, 3177), 'os.path.join', 'os.path.join', (['self.tempdir', '"""output.json"""'], {}), "(self.tempdir, 'output.json')\n", (3148, 3177), False, 'import os\n'), ((3399, 3438), 'libcros_schema.GetNamedTuple', 'libcros_schema.GetNamedTuple', (['json_dict'], {}), '(json_dict)\n', (3427, 3438), False, 'import libcros_schema\n'), ((3850, 3891), 'os.path.join', 'os.path.join', (['self.tempdir', '"""output.json"""'], {}), "(self.tempdir, 'output.json')\n", (3862, 3891), False, 'import os\n'), ((4119, 4158), 'libcros_schema.GetNamedTuple', 'libcros_schema.GetNamedTuple', (['json_dict'], {}), '(json_dict)\n', (4147, 4158), False, 'import libcros_schema\n'), ((4422, 4463), 'os.path.join', 'os.path.join', (['self.tempdir', '"""output.json"""'], {}), "(self.tempdir, 'output.json')\n", (4434, 4463), False, 'import os\n'), ((4694, 4733), 'libcros_schema.GetNamedTuple', 'libcros_schema.GetNamedTuple', (['json_dict'], {}), '(json_dict)\n', (4722, 4733), False, 'import libcros_schema\n'), ((3221, 3285), 'os.path.join', 'os.path.join', (['this_dir', '"""test_data/cros_config_test_device.yaml"""'], {}), "(this_dir, 'test_data/cros_config_test_device.yaml')\n", (3233, 3285), False, 'import os\n'), ((3358, 3382), 'chromite.lib.osutils.ReadFile', 'osutils.ReadFile', (['output'], {}), '(output)\n', (3374, 3382), False, 'from chromite.lib import osutils\n'), ((3935, 3999), 'os.path.join', 'os.path.join', (['this_dir', '"""test_data/cros_config_test_device.yaml"""'], {}), "(this_dir, 'test_data/cros_config_test_device.yaml')\n", (3947, 3999), False, 'import os\n'), ((4078, 4102), 'chromite.lib.osutils.ReadFile', 'osutils.ReadFile', (['output'], {}), '(output)\n', (4094, 4102), False, 'from chromite.lib import osutils\n'), ((4507, 4571), 'os.path.join', 'os.path.join', (['this_dir', '"""test_data/cros_config_test_device.yaml"""'], {}), "(this_dir, 'test_data/cros_config_test_device.yaml')\n", (4519, 4571), False, 'import os\n'), ((4653, 4677), 'chromite.lib.osutils.ReadFile', 'osutils.ReadFile', (['output'], {}), '(output)\n', (4669, 4677), False, 'from chromite.lib import osutils\n')]
|
"""
Copyright (c) 2017 Red Hat, Inc
All rights reserved.
This software may be modified and distributed under the terms
of the BSD license. See the LICENSE file for details.
"""
from __future__ import unicode_literals
from dockerfile_parse import DockerfileParser
from atomic_reactor.plugin import PluginFailedException
from atomic_reactor.build import InsideBuilder, BuildResult
from atomic_reactor.util import ImageName, CommandResult
from atomic_reactor.inner import DockerBuildWorkflow
from tests.docker_mock import mock_docker
from flexmock import flexmock
import pytest
from tests.constants import MOCK_SOURCE
class MockDockerTasker(object):
def inspect_image(self, name):
return {}
def build_image_from_path(self):
return True
class X(object):
pass
class MockInsideBuilder(object):
def __init__(self, failed=False):
self.tasker = MockDockerTasker()
self.base_image = ImageName(repo='Fedora', tag='22')
self.image_id = 'asd'
self.image = 'image'
self.failed = failed
self.df_path = 'some'
self.df_dir = 'some'
def simplegen(x, y):
yield "some\u2018".encode('utf-8')
flexmock(self.tasker, build_image_from_path=simplegen)
@property
def source(self):
result = X()
setattr(result, 'dockerfile_path', '/')
setattr(result, 'path', '/tmp')
return result
def pull_base_image(self, source_registry, insecure=False):
pass
def get_built_image_info(self):
return {'Id': 'some'}
def inspect_built_image(self):
return None
def ensure_not_built(self):
pass
@pytest.mark.parametrize('is_failed', [
True,
False,
])
def test_build(is_failed):
"""
tests docker build api plugin working
"""
flexmock(DockerfileParser, content='df_content')
mock_docker()
fake_builder = MockInsideBuilder()
flexmock(InsideBuilder).new_instances(fake_builder)
workflow = DockerBuildWorkflow(MOCK_SOURCE, 'test-image')
flexmock(CommandResult).should_receive('is_failed').and_return(is_failed)
if is_failed:
flexmock(CommandResult, error_detail="built error")
if is_failed:
with pytest.raises(PluginFailedException):
workflow.build_docker_image()
else:
workflow.build_docker_image()
assert isinstance(workflow.buildstep_result['docker_api'], BuildResult)
assert workflow.build_result
assert workflow.build_result.is_failed() == is_failed
|
[
"atomic_reactor.util.ImageName",
"flexmock.flexmock",
"tests.docker_mock.mock_docker",
"atomic_reactor.inner.DockerBuildWorkflow",
"pytest.raises",
"pytest.mark.parametrize"
] |
[((1671, 1722), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""is_failed"""', '[True, False]'], {}), "('is_failed', [True, False])\n", (1694, 1722), False, 'import pytest\n'), ((1823, 1871), 'flexmock.flexmock', 'flexmock', (['DockerfileParser'], {'content': '"""df_content"""'}), "(DockerfileParser, content='df_content')\n", (1831, 1871), False, 'from flexmock import flexmock\n'), ((1876, 1889), 'tests.docker_mock.mock_docker', 'mock_docker', ([], {}), '()\n', (1887, 1889), False, 'from tests.docker_mock import mock_docker\n'), ((2001, 2047), 'atomic_reactor.inner.DockerBuildWorkflow', 'DockerBuildWorkflow', (['MOCK_SOURCE', '"""test-image"""'], {}), "(MOCK_SOURCE, 'test-image')\n", (2020, 2047), False, 'from atomic_reactor.inner import DockerBuildWorkflow\n'), ((932, 966), 'atomic_reactor.util.ImageName', 'ImageName', ([], {'repo': '"""Fedora"""', 'tag': '"""22"""'}), "(repo='Fedora', tag='22')\n", (941, 966), False, 'from atomic_reactor.util import ImageName, CommandResult\n'), ((1199, 1253), 'flexmock.flexmock', 'flexmock', (['self.tasker'], {'build_image_from_path': 'simplegen'}), '(self.tasker, build_image_from_path=simplegen)\n', (1207, 1253), False, 'from flexmock import flexmock\n'), ((2152, 2203), 'flexmock.flexmock', 'flexmock', (['CommandResult'], {'error_detail': '"""built error"""'}), "(CommandResult, error_detail='built error')\n", (2160, 2203), False, 'from flexmock import flexmock\n'), ((1933, 1956), 'flexmock.flexmock', 'flexmock', (['InsideBuilder'], {}), '(InsideBuilder)\n', (1941, 1956), False, 'from flexmock import flexmock\n'), ((2236, 2272), 'pytest.raises', 'pytest.raises', (['PluginFailedException'], {}), '(PluginFailedException)\n', (2249, 2272), False, 'import pytest\n'), ((2052, 2075), 'flexmock.flexmock', 'flexmock', (['CommandResult'], {}), '(CommandResult)\n', (2060, 2075), False, 'from flexmock import flexmock\n')]
|
# Copyright 2022 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
import mindspore
import mindspore.nn as nn
import mindspore.ops as P
def conv(in_planes, out_planes, kernel_size=3, stride=1, dilation=1, isReLU=True):
if isReLU:
return nn.SequentialCell(
nn.Conv2d(in_planes, out_planes, kernel_size=kernel_size, stride=stride, dilation=dilation,
padding=((kernel_size - 1) * dilation) // 2, has_bias=True, pad_mode="pad"),
nn.LeakyReLU(0.1)
)
return nn.Conv2d(in_planes, out_planes, kernel_size=kernel_size, stride=stride, dilation=dilation,
padding=((kernel_size - 1) * dilation) // 2, has_bias=True, pad_mode="pad")
def upsample2d_as(inputs, target_as):
_, _, h1, w1 = P.Shape()(target_as)
_, _, h2, _ = P.Shape()(inputs)
resize = (h1 + 0.0) / (h2 + 0.0)
return P.ResizeBilinear((h1, w1))(inputs) * resize
class FeatureExtractor(nn.Cell):
'''Feature extract network'''
def __init__(self, num_chs):
super(FeatureExtractor, self).__init__()
self.num_chs = num_chs
self.convs = nn.CellList()
for _, (ch_in, ch_out) in enumerate(zip(num_chs[:-1], num_chs[1:])):
layer = nn.SequentialCell(
conv(ch_in, ch_out, stride=2),
conv(ch_out, ch_out)
)
self.convs.append(layer)
def construct(self, x):
feature_pyramid = []
feature_pyramid_tmp = []
for _conv in self.convs:
x = _conv(x)
feature_pyramid_tmp.append(x)
feature_pyramid.append(feature_pyramid_tmp[5])
feature_pyramid.append(feature_pyramid_tmp[4])
feature_pyramid.append(feature_pyramid_tmp[3])
feature_pyramid.append(feature_pyramid_tmp[2])
feature_pyramid.append(feature_pyramid_tmp[1])
feature_pyramid.append(feature_pyramid_tmp[0])
return feature_pyramid
# Warping layer ---------------------------------
def get_grid(x):
batch_size, height, width, _ = P.Shape()(x)
tmp1 = nn.Range(batch_size)()
tmp2 = nn.Range(height)()
tmp3 = nn.Range(width)()
inputs = (tmp1, tmp2, tmp3)
Bg, Yg, Xg = P.Meshgrid(indexing='ij')(inputs)
return Bg, Yg, Xg
def nearest_warp(x, flow):
grid_b, grid_y, grid_x = get_grid(x)
flow = flow.astype("Int32")
warped_gy = P.Add()(grid_y, flow[:, :, :, 1])
warped_gx = P.Add()(grid_x, flow[:, :, :, 0])
_, h, w, _ = P.Shape()(x)
warped_gy = mindspore.ops.clip_by_value(warped_gy, 0, h-1)
warped_gx = mindspore.ops.clip_by_value(warped_gx, 0, w-1)
warped_indices = P.Stack(3)([grid_b, warped_gy, warped_gx])
warped_x = P.GatherNd()(x, warped_indices)
return warped_x
def bilinear_warp(x, flow):
_, h, w, _ = P.Shape()(x)
grid_b, grid_y, grid_x = get_grid(x)
grid_b = grid_b.astype("float32")
grid_y = grid_y.astype("float32")
grid_x = grid_x.astype("float32")
temp1 = P.Unstack(-1)(flow)
fx = temp1[0]
fy = temp1[1]
fx_0 = P.Floor()(fx)
fx_1 = fx_0+1
fy_0 = P.Floor()(fy)
fy_1 = fy_0+1
# warping indices
h_lim = h-1
w_lim = w-1
gy_0 = mindspore.ops.clip_by_value(grid_y + fy_0, 0., h_lim)
gy_1 = mindspore.ops.clip_by_value(grid_y + fy_1, 0., h_lim)
gx_0 = mindspore.ops.clip_by_value(grid_x + fx_0, 0., w_lim)
gx_1 = mindspore.ops.clip_by_value(grid_x + fx_1, 0., w_lim)
g_00 = P.Stack(3)([grid_b, gy_0, gx_0]).astype("Int32")
g_01 = P.Stack(3)([grid_b, gy_0, gx_1]).astype("Int32")
g_10 = P.Stack(3)([grid_b, gy_1, gx_0]).astype("Int32")
g_11 = P.Stack(3)([grid_b, gy_1, gx_1]).astype("Int32")
# gather contents
x_00 = P.GatherNd()(x, g_00)
x_01 = P.GatherNd()(x, g_01)
x_10 = P.GatherNd()(x, g_10)
x_11 = P.GatherNd()(x, g_11)
# coefficients
c_00 = P.ExpandDims()((fy_1 - fy) * (fx_1 - fx), 3)
c_01 = P.ExpandDims()((fy_1 - fy) * (fx - fx_0), 3)
c_10 = P.ExpandDims()((fy - fy_0) * (fx_1 - fx), 3)
c_11 = P.ExpandDims()((fy - fy_0) * (fx - fx_0), 3)
return c_00 * x_00 + c_01 * x_01 + c_10 * x_10 + c_11 * x_11
class WarpingLayer(nn.Cell):
'''define warplayer'''
def __init__(self, warp_type='nearest'):
super(WarpingLayer, self).__init__()
self.warp = warp_type
def construct(self, x, flow):
x = mindspore.ops.Transpose()(x, (0, 2, 3, 1))
flow = mindspore.ops.Transpose()(flow, (0, 2, 3, 1))
if self.warp == 'nearest':
x_warped = nearest_warp(x, flow)
else:
x_warped = bilinear_warp(x, flow)
x_warped = mindspore.ops.Transpose()(x_warped, (0, 3, 1, 2))
return x_warped
class OpticalFlowEstimator(nn.Cell):
'''define OpticalFlowEstimator'''
def __init__(self, ch_in):
super(OpticalFlowEstimator, self).__init__()
self.convs = nn.Sequential(
conv(ch_in, 128),
conv(128, 128),
conv(128, 96),
conv(96, 64),
conv(64, 32)
)
self.conv_last = conv(32, 2, isReLU=False)
def construct(self, x):
x_intm = self.convs(x)
return x_intm, self.conv_last(x_intm)
class FlowEstimatorDense(nn.Cell):
'''define FlowEstimator network'''
def __init__(self, ch_in):
super(FlowEstimatorDense, self).__init__()
self.conv1 = conv(ch_in, 128)
self.conv2 = conv(ch_in + 128, 128)
self.conv3 = conv(ch_in + 256, 96)
self.conv4 = conv(ch_in + 352, 64)
self.conv5 = conv(ch_in + 416, 32)
self.conv_last = conv(ch_in + 448, 2, isReLU=False)
self.concat = P.Concat(1)
def construct(self, x):
x1 = self.concat([self.conv1(x), x])
x2 = self.concat([self.conv2(x1), x1])
x3 = self.concat([self.conv3(x2), x2])
x4 = self.concat([self.conv4(x3), x3])
x5 = self.concat([self.conv5(x4), x4])
x_out = self.conv_last(x5)
return x5, x_out
class ContextNetwork(nn.Cell):
'''context network'''
def __init__(self, ch_in):
super(ContextNetwork, self).__init__()
self.convs = nn.SequentialCell(
conv(ch_in, 128, 3, 1, 1),
conv(128, 128, 3, 1, 2),
conv(128, 128, 3, 1, 4),
conv(128, 96, 3, 1, 8),
conv(96, 64, 3, 1, 16),
conv(64, 32, 3, 1, 1),
conv(32, 2, isReLU=False)
)
def construct(self, x):
return self.convs(x)
|
[
"mindspore.ops.Concat",
"mindspore.nn.Range",
"mindspore.ops.Floor",
"mindspore.ops.ExpandDims",
"mindspore.nn.Conv2d",
"mindspore.nn.LeakyReLU",
"mindspore.ops.Unstack",
"mindspore.ops.ResizeBilinear",
"mindspore.ops.clip_by_value",
"mindspore.ops.Shape",
"mindspore.nn.CellList",
"mindspore.ops.Add",
"mindspore.ops.GatherNd",
"mindspore.ops.Stack",
"mindspore.ops.Meshgrid",
"mindspore.ops.Transpose"
] |
[((1125, 1299), 'mindspore.nn.Conv2d', 'nn.Conv2d', (['in_planes', 'out_planes'], {'kernel_size': 'kernel_size', 'stride': 'stride', 'dilation': 'dilation', 'padding': '((kernel_size - 1) * dilation // 2)', 'has_bias': '(True)', 'pad_mode': '"""pad"""'}), "(in_planes, out_planes, kernel_size=kernel_size, stride=stride,\n dilation=dilation, padding=(kernel_size - 1) * dilation // 2, has_bias=\n True, pad_mode='pad')\n", (1134, 1299), True, 'import mindspore.nn as nn\n'), ((3106, 3154), 'mindspore.ops.clip_by_value', 'mindspore.ops.clip_by_value', (['warped_gy', '(0)', '(h - 1)'], {}), '(warped_gy, 0, h - 1)\n', (3133, 3154), False, 'import mindspore\n'), ((3169, 3217), 'mindspore.ops.clip_by_value', 'mindspore.ops.clip_by_value', (['warped_gx', '(0)', '(w - 1)'], {}), '(warped_gx, 0, w - 1)\n', (3196, 3217), False, 'import mindspore\n'), ((3784, 3838), 'mindspore.ops.clip_by_value', 'mindspore.ops.clip_by_value', (['(grid_y + fy_0)', '(0.0)', 'h_lim'], {}), '(grid_y + fy_0, 0.0, h_lim)\n', (3811, 3838), False, 'import mindspore\n'), ((3849, 3903), 'mindspore.ops.clip_by_value', 'mindspore.ops.clip_by_value', (['(grid_y + fy_1)', '(0.0)', 'h_lim'], {}), '(grid_y + fy_1, 0.0, h_lim)\n', (3876, 3903), False, 'import mindspore\n'), ((3914, 3968), 'mindspore.ops.clip_by_value', 'mindspore.ops.clip_by_value', (['(grid_x + fx_0)', '(0.0)', 'w_lim'], {}), '(grid_x + fx_0, 0.0, w_lim)\n', (3941, 3968), False, 'import mindspore\n'), ((3979, 4033), 'mindspore.ops.clip_by_value', 'mindspore.ops.clip_by_value', (['(grid_x + fx_1)', '(0.0)', 'w_lim'], {}), '(grid_x + fx_1, 0.0, w_lim)\n', (4006, 4033), False, 'import mindspore\n'), ((1373, 1382), 'mindspore.ops.Shape', 'P.Shape', ([], {}), '()\n', (1380, 1382), True, 'import mindspore.ops as P\n'), ((1412, 1421), 'mindspore.ops.Shape', 'P.Shape', ([], {}), '()\n', (1419, 1421), True, 'import mindspore.ops as P\n'), ((1725, 1738), 'mindspore.nn.CellList', 'nn.CellList', ([], {}), '()\n', (1736, 1738), True, 'import mindspore.nn as nn\n'), ((2647, 2656), 'mindspore.ops.Shape', 'P.Shape', ([], {}), '()\n', (2654, 2656), True, 'import mindspore.ops as P\n'), ((2671, 2691), 'mindspore.nn.Range', 'nn.Range', (['batch_size'], {}), '(batch_size)\n', (2679, 2691), True, 'import mindspore.nn as nn\n'), ((2705, 2721), 'mindspore.nn.Range', 'nn.Range', (['height'], {}), '(height)\n', (2713, 2721), True, 'import mindspore.nn as nn\n'), ((2735, 2750), 'mindspore.nn.Range', 'nn.Range', (['width'], {}), '(width)\n', (2743, 2750), True, 'import mindspore.nn as nn\n'), ((2802, 2827), 'mindspore.ops.Meshgrid', 'P.Meshgrid', ([], {'indexing': '"""ij"""'}), "(indexing='ij')\n", (2812, 2827), True, 'import mindspore.ops as P\n'), ((2976, 2983), 'mindspore.ops.Add', 'P.Add', ([], {}), '()\n', (2981, 2983), True, 'import mindspore.ops as P\n'), ((3026, 3033), 'mindspore.ops.Add', 'P.Add', ([], {}), '()\n', (3031, 3033), True, 'import mindspore.ops as P\n'), ((3077, 3086), 'mindspore.ops.Shape', 'P.Shape', ([], {}), '()\n', (3084, 3086), True, 'import mindspore.ops as P\n'), ((3237, 3247), 'mindspore.ops.Stack', 'P.Stack', (['(3)'], {}), '(3)\n', (3244, 3247), True, 'import mindspore.ops as P\n'), ((3296, 3308), 'mindspore.ops.GatherNd', 'P.GatherNd', ([], {}), '()\n', (3306, 3308), True, 'import mindspore.ops as P\n'), ((3394, 3403), 'mindspore.ops.Shape', 'P.Shape', ([], {}), '()\n', (3401, 3403), True, 'import mindspore.ops as P\n'), ((3575, 3588), 'mindspore.ops.Unstack', 'P.Unstack', (['(-1)'], {}), '(-1)\n', (3584, 3588), True, 'import mindspore.ops as P\n'), ((3642, 3651), 'mindspore.ops.Floor', 'P.Floor', ([], {}), '()\n', (3649, 3651), True, 'import mindspore.ops as P\n'), ((3685, 3694), 'mindspore.ops.Floor', 'P.Floor', ([], {}), '()\n', (3692, 3694), True, 'import mindspore.ops as P\n'), ((4308, 4320), 'mindspore.ops.GatherNd', 'P.GatherNd', ([], {}), '()\n', (4318, 4320), True, 'import mindspore.ops as P\n'), ((4341, 4353), 'mindspore.ops.GatherNd', 'P.GatherNd', ([], {}), '()\n', (4351, 4353), True, 'import mindspore.ops as P\n'), ((4374, 4386), 'mindspore.ops.GatherNd', 'P.GatherNd', ([], {}), '()\n', (4384, 4386), True, 'import mindspore.ops as P\n'), ((4407, 4419), 'mindspore.ops.GatherNd', 'P.GatherNd', ([], {}), '()\n', (4417, 4419), True, 'import mindspore.ops as P\n'), ((4460, 4474), 'mindspore.ops.ExpandDims', 'P.ExpandDims', ([], {}), '()\n', (4472, 4474), True, 'import mindspore.ops as P\n'), ((4516, 4530), 'mindspore.ops.ExpandDims', 'P.ExpandDims', ([], {}), '()\n', (4528, 4530), True, 'import mindspore.ops as P\n'), ((4572, 4586), 'mindspore.ops.ExpandDims', 'P.ExpandDims', ([], {}), '()\n', (4584, 4586), True, 'import mindspore.ops as P\n'), ((4628, 4642), 'mindspore.ops.ExpandDims', 'P.ExpandDims', ([], {}), '()\n', (4640, 4642), True, 'import mindspore.ops as P\n'), ((6252, 6263), 'mindspore.ops.Concat', 'P.Concat', (['(1)'], {}), '(1)\n', (6260, 6263), True, 'import mindspore.ops as P\n'), ((883, 1057), 'mindspore.nn.Conv2d', 'nn.Conv2d', (['in_planes', 'out_planes'], {'kernel_size': 'kernel_size', 'stride': 'stride', 'dilation': 'dilation', 'padding': '((kernel_size - 1) * dilation // 2)', 'has_bias': '(True)', 'pad_mode': '"""pad"""'}), "(in_planes, out_planes, kernel_size=kernel_size, stride=stride,\n dilation=dilation, padding=(kernel_size - 1) * dilation // 2, has_bias=\n True, pad_mode='pad')\n", (892, 1057), True, 'import mindspore.nn as nn\n'), ((1086, 1103), 'mindspore.nn.LeakyReLU', 'nn.LeakyReLU', (['(0.1)'], {}), '(0.1)\n', (1098, 1103), True, 'import mindspore.nn as nn\n'), ((1478, 1504), 'mindspore.ops.ResizeBilinear', 'P.ResizeBilinear', (['(h1, w1)'], {}), '((h1, w1))\n', (1494, 1504), True, 'import mindspore.ops as P\n'), ((4963, 4988), 'mindspore.ops.Transpose', 'mindspore.ops.Transpose', ([], {}), '()\n', (4986, 4988), False, 'import mindspore\n'), ((5021, 5046), 'mindspore.ops.Transpose', 'mindspore.ops.Transpose', ([], {}), '()\n', (5044, 5046), False, 'import mindspore\n'), ((5226, 5251), 'mindspore.ops.Transpose', 'mindspore.ops.Transpose', ([], {}), '()\n', (5249, 5251), False, 'import mindspore\n'), ((4045, 4055), 'mindspore.ops.Stack', 'P.Stack', (['(3)'], {}), '(3)\n', (4052, 4055), True, 'import mindspore.ops as P\n'), ((4105, 4115), 'mindspore.ops.Stack', 'P.Stack', (['(3)'], {}), '(3)\n', (4112, 4115), True, 'import mindspore.ops as P\n'), ((4165, 4175), 'mindspore.ops.Stack', 'P.Stack', (['(3)'], {}), '(3)\n', (4172, 4175), True, 'import mindspore.ops as P\n'), ((4225, 4235), 'mindspore.ops.Stack', 'P.Stack', (['(3)'], {}), '(3)\n', (4232, 4235), True, 'import mindspore.ops as P\n')]
|
import time
import random
import json
import re
import requests
import lxml.html
RATINGS = ['Funny', 'Confusing', 'Unconvincing', 'Informative', 'Fascinating',
'Persuasive', 'Obnoxious', 'Courageous', 'Beautiful', 'Longwinded',
'Inspiring', 'Ingenious', 'Jaw-dropping', 'OK']
LECTION_TOPICS_REGEX = re.compile(r'<meta content="(.+)" name="keywords" />')
lections = []
def proccess_lection(lection_id):
global lections
try:
ted_url = 'http://www.ted.com/talks/{}'
response = requests.get(ted_url.format(lection_id))
assert response.status_code != 429
html = response.text
except Exception:
print('Trying again for', lection_id)
time.sleep(2 + random.randint(1, 10))
proccess_lection(lection_id)
return
lection_page = lxml.html.document_fromstring(html)
lection = {'id': lection_id, 'topics': [], 'ratings': {}}
# Получим рейтинг лекции по разным параметрам
for rating in RATINGS:
# Создадим регулярное выражение для поиска рейтинга
regex = re.compile(r'"name":"{}","count":(\d+)'.format(rating))
match = re.search(regex, lection_page.text_content())
if match is None:
print('No rating for', rating, 'in', lection_id)
return
rating_count = match.group(1)
lection['ratings'][rating] = int(rating_count)
# Получаем список тем лекции
match = re.search(LECTION_TOPICS_REGEX, html)
if match is None:
print('No topics for', lection_id)
return
topics_list = match.group(1)
for topic in topics_list.split(', '):
# Убираем ненужные категории
if topic not in ['TED', 'talks', 'TED Conference']:
lection['topics'].append(topic)
lections.append(lection)
threads = []
# Существует не более 2500 лекций
for i in range(1, 2500):
proccess_lection(i)
if i % 10 == 0:
print(i, 'lections done.')
print(len(lections))
result_file = open('data/lections.json', 'w')
result_file.write(json.dumps(lections))
result_file.close()
|
[
"re.search",
"random.randint",
"json.dumps",
"re.compile"
] |
[((322, 375), 're.compile', 're.compile', (['"""<meta content="(.+)" name="keywords" />"""'], {}), '(\'<meta content="(.+)" name="keywords" />\')\n', (332, 375), False, 'import re\n'), ((1439, 1476), 're.search', 're.search', (['LECTION_TOPICS_REGEX', 'html'], {}), '(LECTION_TOPICS_REGEX, html)\n', (1448, 1476), False, 'import re\n'), ((2042, 2062), 'json.dumps', 'json.dumps', (['lections'], {}), '(lections)\n', (2052, 2062), False, 'import json\n'), ((728, 749), 'random.randint', 'random.randint', (['(1)', '(10)'], {}), '(1, 10)\n', (742, 749), False, 'import random\n')]
|
import sys, random, math, pygame
from pygame import locals
from datetime import datetime, time, date
# main program begins
pygame.init()
screen = pygame.display.set_mode((600, 500))
pygame.display.set_caption("Analog Clock")
font = pygame.font.Font(None, 36)
orange = 220, 180, 0
white = 255, 255, 255
yellow = 255, 255, 0
pink = 255, 100, 100
pos_x = 300
pos_y = 250
radius = 250
angle = 360
def print_text(font, x, y, text, color=white):
imgText = font.render(text, True, color)
screen.blit(imgText, (x, y))
def wrap_angle(angle):
return abs(angle % 360)
# repeating loop
while True:
for event in pygame.event.get():
if event.type == locals.QUIT:
sys.exit()
keys = pygame.key.get_pressed()
if keys[locals.K_ESCAPE]:
sys.exit()
screen.fill((0, 0, 100))
# draw one step around the circle
pygame.draw.circle(screen, white, (pos_x, pos_y), radius, 6)
# draw the clock numbers 1-12
for n in range(1, 13):
angle = math.radians(n * (360 / 12) - 90)
x = math.cos(angle) * (radius - 50) - 10
y = math.sin(angle) * (radius - 50) - 10
print_text(font, pos_x + x, pos_y + y, str(n))
# get the time of day
today = datetime.today()
hours = today.hour % 12
minutes = today.minute
seconds = today.second
# draw the hours hand
hour_angle = wrap_angle(hours * (360 / 12) - 90)
hour_angle = math.radians(hour_angle)
hour_x = math.cos(hour_angle) * (radius - 80)
hour_y = math.sin(hour_angle) * (radius - 80)
target = (pos_x + hour_x, pos_y + hour_y)
pygame.draw.line(screen, pink, (pos_x, pos_y), target, 25)
# draw the minutes hand
min_angle = wrap_angle(minutes * (360 / 60) - 90)
min_angle = math.radians(min_angle)
min_x = math.cos(min_angle) * (radius - 60)
min_y = math.sin(min_angle) * (radius - 60)
target = (pos_x + min_x, pos_y + min_y)
pygame.draw.line(screen, orange, (pos_x, pos_y), target, 12)
# draw the seconds hand
sec_angle = wrap_angle(seconds * (360 / 60) - 90)
sec_angle = math.radians(sec_angle)
sec_x = math.cos(sec_angle) * (radius - 40)
sec_y = math.sin(sec_angle) * (radius - 40)
target = (pos_x + sec_x, pos_y + sec_y)
pygame.draw.line(screen, yellow, (pos_x, pos_y), target, 6)
# cover the center
pygame.draw.circle(screen, white, (pos_x, pos_y), 20)
print_text(font, 0, 0, str(hours) + ":" + str(minutes) + ":" + str(seconds))
pygame.display.update()
|
[
"pygame.draw.line",
"pygame.draw.circle",
"datetime.datetime.today",
"pygame.event.get",
"pygame.display.set_mode",
"math.radians",
"pygame.init",
"math.sin",
"pygame.display.update",
"math.cos",
"pygame.font.Font",
"pygame.display.set_caption",
"pygame.key.get_pressed",
"sys.exit"
] |
[((124, 137), 'pygame.init', 'pygame.init', ([], {}), '()\n', (135, 137), False, 'import sys, random, math, pygame\n'), ((147, 182), 'pygame.display.set_mode', 'pygame.display.set_mode', (['(600, 500)'], {}), '((600, 500))\n', (170, 182), False, 'import sys, random, math, pygame\n'), ((183, 225), 'pygame.display.set_caption', 'pygame.display.set_caption', (['"""Analog Clock"""'], {}), "('Analog Clock')\n", (209, 225), False, 'import sys, random, math, pygame\n'), ((234, 260), 'pygame.font.Font', 'pygame.font.Font', (['None', '(36)'], {}), '(None, 36)\n', (250, 260), False, 'import sys, random, math, pygame\n'), ((622, 640), 'pygame.event.get', 'pygame.event.get', ([], {}), '()\n', (638, 640), False, 'import sys, random, math, pygame\n'), ((714, 738), 'pygame.key.get_pressed', 'pygame.key.get_pressed', ([], {}), '()\n', (736, 738), False, 'import sys, random, math, pygame\n'), ((860, 920), 'pygame.draw.circle', 'pygame.draw.circle', (['screen', 'white', '(pos_x, pos_y)', 'radius', '(6)'], {}), '(screen, white, (pos_x, pos_y), radius, 6)\n', (878, 920), False, 'import sys, random, math, pygame\n'), ((1241, 1257), 'datetime.datetime.today', 'datetime.today', ([], {}), '()\n', (1255, 1257), False, 'from datetime import datetime, time, date\n'), ((1437, 1461), 'math.radians', 'math.radians', (['hour_angle'], {}), '(hour_angle)\n', (1449, 1461), False, 'import sys, random, math, pygame\n'), ((1612, 1670), 'pygame.draw.line', 'pygame.draw.line', (['screen', 'pink', '(pos_x, pos_y)', 'target', '(25)'], {}), '(screen, pink, (pos_x, pos_y), target, 25)\n', (1628, 1670), False, 'import sys, random, math, pygame\n'), ((1770, 1793), 'math.radians', 'math.radians', (['min_angle'], {}), '(min_angle)\n', (1782, 1793), False, 'import sys, random, math, pygame\n'), ((1938, 1998), 'pygame.draw.line', 'pygame.draw.line', (['screen', 'orange', '(pos_x, pos_y)', 'target', '(12)'], {}), '(screen, orange, (pos_x, pos_y), target, 12)\n', (1954, 1998), False, 'import sys, random, math, pygame\n'), ((2098, 2121), 'math.radians', 'math.radians', (['sec_angle'], {}), '(sec_angle)\n', (2110, 2121), False, 'import sys, random, math, pygame\n'), ((2266, 2325), 'pygame.draw.line', 'pygame.draw.line', (['screen', 'yellow', '(pos_x, pos_y)', 'target', '(6)'], {}), '(screen, yellow, (pos_x, pos_y), target, 6)\n', (2282, 2325), False, 'import sys, random, math, pygame\n'), ((2354, 2407), 'pygame.draw.circle', 'pygame.draw.circle', (['screen', 'white', '(pos_x, pos_y)', '(20)'], {}), '(screen, white, (pos_x, pos_y), 20)\n', (2372, 2407), False, 'import sys, random, math, pygame\n'), ((2495, 2518), 'pygame.display.update', 'pygame.display.update', ([], {}), '()\n', (2516, 2518), False, 'import sys, random, math, pygame\n'), ((777, 787), 'sys.exit', 'sys.exit', ([], {}), '()\n', (785, 787), False, 'import sys, random, math, pygame\n'), ((1003, 1036), 'math.radians', 'math.radians', (['(n * (360 / 12) - 90)'], {}), '(n * (360 / 12) - 90)\n', (1015, 1036), False, 'import sys, random, math, pygame\n'), ((1475, 1495), 'math.cos', 'math.cos', (['hour_angle'], {}), '(hour_angle)\n', (1483, 1495), False, 'import sys, random, math, pygame\n'), ((1525, 1545), 'math.sin', 'math.sin', (['hour_angle'], {}), '(hour_angle)\n', (1533, 1545), False, 'import sys, random, math, pygame\n'), ((1806, 1825), 'math.cos', 'math.cos', (['min_angle'], {}), '(min_angle)\n', (1814, 1825), False, 'import sys, random, math, pygame\n'), ((1854, 1873), 'math.sin', 'math.sin', (['min_angle'], {}), '(min_angle)\n', (1862, 1873), False, 'import sys, random, math, pygame\n'), ((2134, 2153), 'math.cos', 'math.cos', (['sec_angle'], {}), '(sec_angle)\n', (2142, 2153), False, 'import sys, random, math, pygame\n'), ((2182, 2201), 'math.sin', 'math.sin', (['sec_angle'], {}), '(sec_angle)\n', (2190, 2201), False, 'import sys, random, math, pygame\n'), ((692, 702), 'sys.exit', 'sys.exit', ([], {}), '()\n', (700, 702), False, 'import sys, random, math, pygame\n'), ((1053, 1068), 'math.cos', 'math.cos', (['angle'], {}), '(angle)\n', (1061, 1068), False, 'import sys, random, math, pygame\n'), ((1106, 1121), 'math.sin', 'math.sin', (['angle'], {}), '(angle)\n', (1114, 1121), False, 'import sys, random, math, pygame\n')]
|
import pytest
from _cob import mymodels
@pytest.fixture
def people():
people = []
for _ in range(10):
person = mymodels.Person(first_name="First", last_name="Last")
people.append(person)
mymodels.db.session.add(person)
mymodels.db.session.commit()
yield people
for person in people:
mymodels.db.session.delete(person)
mymodels.db.session.commit()
def test_person_model_and_view(webapp, people):
assert webapp.get("/index/list_models") == [{"id": person.id} for person in people]
|
[
"_cob.mymodels.db.session.commit",
"_cob.mymodels.db.session.delete",
"_cob.mymodels.Person",
"_cob.mymodels.db.session.add"
] |
[((257, 285), '_cob.mymodels.db.session.commit', 'mymodels.db.session.commit', ([], {}), '()\n', (283, 285), False, 'from _cob import mymodels\n'), ((376, 404), '_cob.mymodels.db.session.commit', 'mymodels.db.session.commit', ([], {}), '()\n', (402, 404), False, 'from _cob import mymodels\n'), ((129, 182), '_cob.mymodels.Person', 'mymodels.Person', ([], {'first_name': '"""First"""', 'last_name': '"""Last"""'}), "(first_name='First', last_name='Last')\n", (144, 182), False, 'from _cob import mymodels\n'), ((221, 252), '_cob.mymodels.db.session.add', 'mymodels.db.session.add', (['person'], {}), '(person)\n', (244, 252), False, 'from _cob import mymodels\n'), ((337, 371), '_cob.mymodels.db.session.delete', 'mymodels.db.session.delete', (['person'], {}), '(person)\n', (363, 371), False, 'from _cob import mymodels\n')]
|
'''
main.py
Created by <NAME> on 2020
Copyright © 2020 <NAME>. All rights reserved.
'''
import sys
a, b= map(int, sys.stdin.readline().rstrip().split(' '))
print(a * b)
|
[
"sys.stdin.readline"
] |
[((133, 153), 'sys.stdin.readline', 'sys.stdin.readline', ([], {}), '()\n', (151, 153), False, 'import sys\n')]
|
#!/usr/bin/env python
"""
Fraunhofer IML
Department Automation and Embedded Systems
Tabsize : 4
Charset : UTF-8
"""
__author__ = "<NAME>"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
from abc import ABCMeta, abstractmethod
import xml.etree.ElementTree
from MARSVertex import MARSVertex, MARSFootprintType, DEFAULT_FOOTPRINT_RESOLUTION
from MARSEdge import MARSEdge
from mars_common.Id import Id, IdType
import rospy
class TopologyParser():
"""Abstract class for parsing topologies.
Abstract class for parsing topologies.
Attributes:
__id: Current id for the topology entity that will be created.
After creating an entity the id must be increased.
_mars_vertices: Contains all mars vertices which has to be started
in a dictionary.
key = id of the vertex, value = MARSVertex
_mars_edges: Contains all mars edges which has to be started
in a dictionary.
key = id of the edge, value = MARSEdge
"""
__metaclass__ = ABCMeta
def __init__(self):
self.__id = 0
self._mars_vertices = dict()
self._mars_edges = dict()
@abstractmethod
def parse_file(self, file_path):
"""Reads a topology file an creates mars edge and vertex entities.
Reads a topology file an creates mars edge and vertex entities.
This entities can be used to start topology nodes (edges and vertices).
Args:
file_path: path to the file on the system as a string.
Returns:
Returns "True" if the file was successfully opened and parsed!
Raises:
"""
pass
def get_mars_topology_vertices(self):
"""Return all MARSVertex objects.
Returns all MARSVertex objects in a dictionary.
Args:
Returns:
Returns a dictionary with all MARSVertex objects.
Raises:
"""
return self._mars_vertices
def get_mars_topology_edges(self):
"""Return all MARSEdge objects.
Returns all MARSEdge objects in a dictionary.
Args:
Returns:
Returns a dictionary with all MARSEdge objects.
Raises:
"""
return self._mars_edges
def _create_mars_vertex(
self, vertex_name, x_position, y_position, footprint_type, footprint_radius,
footprint_resolution=DEFAULT_FOOTPRINT_RESOLUTION, uuid=None,
uuid_type=IdType.ID_TYPE_STRING_UUID,
footprint_x=None, footprint_y=None):
"""Creates an object of type MARSVertex.
Creates an object of type MARSVertex and sets an unique id!
Args:
vertex_name: Name of the vertex.
x_position: X-Position of the vertex.
y_position: Y-Position of the vertex.
uuid: A string based uuid or name.
uuid_tpye: Type of the given uuid.
Returns:
Return the created MARSVertex object.
Raises:
"""
if uuid is not None:
mars_vertex = MARSVertex(Id(uuid, uuid_type, description=vertex_name))
else:
mars_vertex = MARSVertex(
Id(self.__id, IdType.ID_TYPE_STRING_NAME, description=vertex_name))
mars_vertex.set_name(vertex_name)
mars_vertex.set_position(x_position, y_position)
if footprint_x and footprint_y:
mars_vertex.add_footprint(footprint_x, footprint_y)
else:
if (footprint_type == MARSFootprintType.MARS_FOOTPRINT_TYPE_SQUARE):
mars_vertex.calc_square_footprint(footprint_radius)
elif (footprint_type == MARSFootprintType.MARS_FOOTPRINT_TYPE_CIRCLE):
mars_vertex.calc_circle_footprint(footprint_radius, footprint_resolution)
else:
rospy.logwarn(
"[TopologyParser][_create_mars_vertex] Unknown footprint type for creating footprint was given. Continue with calculating circle footprint.")
mars_vertex.calc_circle_footprint(footprint_radius, footprint_resolution)
self.__id = self.__id + 1
return mars_vertex
def _create_mars_edge(self, edge_name, length, uuid=None,
footprint_x=None, footprint_y=None):
"""Creates an object of type MARSVertex.
Creates an object of type MARSVertex and sets an unique id!
Args:
edge_name: Name of the edge.
length: Length of the edge in meter (float).
max_velocity: Maximum allowed velocity on the edge on m/s (float).
uuid: A string based uuid (optional)
Returns:
Return the created MARSVertex object.
Raises:
"""
if uuid is not None:
mars_edge = MARSEdge(
Id(uuid, IdType.ID_TYPE_STRING_UUID), length=length)
else:
mars_edge = MARSEdge(
Id(self.__id, IdType.ID_TYPE_STRING_NAME), length=length)
mars_edge.set_name(edge_name)
if footprint_x and footprint_y:
mars_edge.add_footprint(footprint_x, footprint_y)
self.__id = self.__id + 1
return mars_edge
def print_parsed_topology_ros_debug(self):
self.__print_entity(self._mars_vertices)
self.__print_entity(self._mars_edges)
def __print_entity(self, entity_collection):
for entity_name in entity_collection.values():
rospy.logdebug(str(entity_name))
|
[
"rospy.logwarn",
"mars_common.Id.Id"
] |
[((3166, 3210), 'mars_common.Id.Id', 'Id', (['uuid', 'uuid_type'], {'description': 'vertex_name'}), '(uuid, uuid_type, description=vertex_name)\n', (3168, 3210), False, 'from mars_common.Id import Id, IdType\n'), ((3280, 3346), 'mars_common.Id.Id', 'Id', (['self.__id', 'IdType.ID_TYPE_STRING_NAME'], {'description': 'vertex_name'}), '(self.__id, IdType.ID_TYPE_STRING_NAME, description=vertex_name)\n', (3282, 3346), False, 'from mars_common.Id import Id, IdType\n'), ((4912, 4948), 'mars_common.Id.Id', 'Id', (['uuid', 'IdType.ID_TYPE_STRING_UUID'], {}), '(uuid, IdType.ID_TYPE_STRING_UUID)\n', (4914, 4948), False, 'from mars_common.Id import Id, IdType\n'), ((5029, 5070), 'mars_common.Id.Id', 'Id', (['self.__id', 'IdType.ID_TYPE_STRING_NAME'], {}), '(self.__id, IdType.ID_TYPE_STRING_NAME)\n', (5031, 5070), False, 'from mars_common.Id import Id, IdType\n'), ((3922, 4087), 'rospy.logwarn', 'rospy.logwarn', (['"""[TopologyParser][_create_mars_vertex] Unknown footprint type for creating footprint was given. Continue with calculating circle footprint."""'], {}), "(\n '[TopologyParser][_create_mars_vertex] Unknown footprint type for creating footprint was given. Continue with calculating circle footprint.'\n )\n", (3935, 4087), False, 'import rospy\n')]
|
import os
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from PIL import Image
from collections import OrderedDict
import gc
from current_clamp import *
from current_clamp_features import extract_istep_features
from visualization.feature_annotations import feature_name_dict
from read_metadata import *
from file_io import load_current_step
# from pymysql import IntegrityError
import datajoint as dj
schema = dj.schema('yueqi_ephys', locals())
FIG_DIR = 'analysis_current_clamp/figures_plot_recording'
'''
class DjImportedFromDirectory(dj.Imported):
# Subclass of Imported. Initialize with data directory.
def __init__(self, directory=''):
self.directory = directory
super().__init__()
'''
@schema
class EphysExperimentsForAnalysis(dj.Manual):
definition = """
# Ephys experiments (excel files) for analysis
experiment: varchar(128) # excel files to use for analysis
---
project: varchar(128) # which project the data belongs to
use: enum('Yes', 'No') # whether to use this experiment
directory: varchar(256) # the parent project directory
"""
def insert_experiment(self, excel_file):
'''
Insert new sample ephys metadata from excel to datajoint tables
'''
entry_list = pd.read_excel(excel_file)[['experiment', 'project', 'use', 'directory']].dropna(how='any')
entry_list = entry_list.to_dict('records')
no_insert = True
for entry in entry_list:
if entry['use'] == 'No':
continue
self.insert1(row=entry, skip_duplicates=True)
no_insert = False
#print("Inserted: " + str(entry))
if no_insert:
print("No new entry inserted.")
return
@schema
class Animals(dj.Imported):
definition = """
# Sample metadata
-> EphysExperimentsForAnalysis
---
id: varchar(128) # organod ID (use date, but need better naming)
strain : varchar(128) # genetic strain
dob = null: date # date of birth
date = null: date # recording date
age = null: smallint # nunmber of days (date - dob)
slicetype: varchar(128) # what kind of slice prep
external: varchar(128) # external solution
internal: varchar(128) # internal solution
animal_comment = '': varchar(256) # general comments
"""
def _make_tuples(self, key):
ephys_exp = (EphysExperimentsForAnalysis() & key).fetch1()
directory = os.path.expanduser(ephys_exp.pop('directory', None))
print('Populating for: ', key)
animal_info, _ = read_ephys_info_from_excel_2017(
os.path.join(directory, key['experiment'] + '.xlsx'))
key['id'] = animal_info['id']
key['strain'] = animal_info['strain']
if not pd.isnull(animal_info['DOB']): key['dob'] = animal_info['DOB']
if not pd.isnull(animal_info['age']): key['age'] = animal_info['age']
key['date'] = animal_info['date']
key['slicetype'] = animal_info['type']
key['external'] = animal_info['external']
key['internal'] = animal_info['internal']
if not pd.isnull(animal_info['comment']): key['animal_comment'] = animal_info['comment']
self.insert1(row=key)
return
@schema
class PatchCells(dj.Imported):
definition = """
# Patch clamp metadata for each cell
-> EphysExperimentsForAnalysis
cell: varchar(128) # cell id
---
rp = null: float # pipette resistance
cm_est = null: float # estimated Cm
ra_est = null: float # estimated Ra right after whole-cell mode
rm_est = null: float # estimated Rm
v_rest = null: float # resting membrane potential
fluor = '': varchar(128) # fluorescent label
fill = 'no': enum('yes', 'no', 'unknown', 'out') # wether the cell is biocytin filled. Out -- cell came out with pipette.
cell_external = '': varchar(128) # external if different from sample metadata
cell_internal = '': varchar(128) # internal if different from sample metadata
depth = '': varchar(128) # microns beneath slice surface
location = '': varchar(128) # spatial location
"""
def _make_tuples(self, key):
ephys_exp = (EphysExperimentsForAnalysis() & key).fetch1()
directory = os.path.expanduser(ephys_exp.pop('directory', None))
print('Populating for: ', key)
_, metadata = read_ephys_info_from_excel_2017(
os.path.join(directory, key['experiment'] + '.xlsx'))
if 'params' in metadata.columns:
old_file = True
cell_info = parse_cell_info_2017_vertical(metadata)
else:
old_file = False
cell_info = parse_cell_info_2017(metadata)
for i, row in cell_info.iterrows():
newkey = {}
newkey['experiment'] = key['experiment']
newkey['cell'] = row['cell']
if not pd.isnull(row['Rp']): newkey['rp'] = row['Rp']
if not pd.isnull(row['Cm']): newkey['cm_est'] = row['Cm']
if not pd.isnull(row['Ra']): newkey['ra_est'] = row['Ra']
if not pd.isnull(row['Vrest']): newkey['v_rest'] = row['Vrest']
if not pd.isnull(row['depth']): newkey['depth'] = row['depth']
if not old_file:
if not pd.isnull(row['fluor']): newkey['fluor'] = row['fluor']
if not pd.isnull(row['Rm']): newkey['rm_est'] = row['Rm']
if not pd.isnull(row['external']): newkey['cell_external'] = row['external']
if not pd.isnull(row['internal']): newkey['cell_internal'] = row['internal']
if not pd.isnull(row['location']): newkey['location'] = row['location']
if not pd.isnull(row['fill']):
if row['fill'].lower() in ['yes', 'no', 'unknown', 'out']:
newkey['fill'] = row['fill'].lower()
else:
print('"fill" must be yes/no/unknown/out. ')
#print(newkey)
self.insert1(row=newkey)
return
@schema
class EphysRecordings(dj.Imported):
definition = """
# Patch clamp metadata for each recording file
-> EphysExperimentsForAnalysis
cell: varchar(128) # cell id
recording: varchar(128) # recording file name
---
clamp = null : enum('v', 'i') # voltage or current clamp
protocol = '' : varchar(128) # protocols such as gapfree, istep, etc
hold = null : smallint # holding current or voltage
ra_pre = null : smallint # estimated Ra before protocol
ra_post = null : smallint # estimated Ra after protocol
compensate = '' : varchar(128) # percentage of Ra compensation
gain = null : smallint # amplifier gain
filter = null : smallint # filter in kHz
start = null : smallint # current step starting current
step = null : smallint # step size of current injection
stim_strength = '' : varchar(128) # electrical/optical stimulation strength
stim_duration = null : smallint # duration of each stim pulse
stim_interval = null : smallint # interval between two consecutive pulses
response = '' : varchar(256) # what kind of reponse was observed
comment = '' : varchar(256) # general comments
"""
def _make_tuples(self, key):
ephys_exp = (EphysExperimentsForAnalysis() & key).fetch1()
directory = os.path.expanduser(ephys_exp.pop('directory', None))
print('Populating for: ', key)
_, metadata = read_ephys_info_from_excel_2017(
os.path.join(directory, key['experiment'] + '.xlsx'))
patch_info = parse_patch_info_2017(metadata)
for i, row in patch_info.iterrows():
newkey = {}
newkey['experiment'] = key['experiment']
newkey['cell'] = row['cell']
newkey['recording'] = row['file']
if not pd.isnull(row['clamp']): newkey['clamp'] = row['clamp'].lower()
if not pd.isnull(row['protocol']): newkey['protocol'] = row['protocol']
if not pd.isnull(row['hold']): newkey['hold'] = row['hold']
if not pd.isnull(row['Ra-pre']):
if type(row['Ra-pre']) is str:
newkey['ra_pre'] = 100
else:
newkey['ra_pre'] = row['Ra-pre']
if not pd.isnull(row['Ra-post']):
if type(row['Ra-post']) is str:
newkey['ra_post'] = 100
else:
newkey['ra_post'] = row['Ra-post']
if not pd.isnull(row.get('compensate')): newkey['compensate'] = row['compensate']
if not pd.isnull(row['gain']): newkey['gain'] = row['gain']
if not pd.isnull(row['filter']): newkey['filter'] = row['filter']
if not pd.isnull(row.get('start')): newkey['start'] = row['start']
if not pd.isnull(row.get('step')): newkey['step'] = row['step']
if not pd.isnull(row.get('stim strength')): newkey['stim_strength'] = row['stim strength']
if not pd.isnull(row.get('stim duration')): newkey['stim_duration'] = row['stim duration']
if not pd.isnull(row.get('stim interval')): newkey['stim_interval'] = row['stim interval']
if not pd.isnull(row['response']): newkey['response'] = row['response']
if not pd.isnull(row.get('comment')): newkey['comment'] = row['comment']
self.insert1(row=newkey)
return
#TODO write a CurrentStepRecordings class and let APandIntrinsicProperties depend on it.
# currently APandIntrinsicProperties points to each experiment rather than each recording.
@schema
class CurrentStepTimeParams(dj.Manual):
definition = """
# Time window parameters for current injection (account for different protocol settings)
-> EphysExperimentsForAnalysis
---
istep_start: float # current injection starting time (s)
istep_end_1s: float # time after 1st second (s) -- use the 1st second for analysis
istep_end: float # current injection actual ending time (s)
istep_duration: float # current injection duration (s)
"""
def insert_params(self, excel_file):
'''
Insert paramters for current injection
'''
experiments = EphysExperimentsForAnalysis().fetch('experiment')
entry_list = pd.read_excel(excel_file)[['experiment', 'istep_start', 'istep_duration']]
entry_list = entry_list.dropna(how='any').to_dict('records')
no_insert = True
for entry in entry_list:
if not entry['experiment'] in experiments:
continue
entry['istep_end_1s'] = entry['istep_start'] + 1
entry['istep_end'] = entry['istep_start'] + entry['istep_duration']
if entry['istep_end'] < entry['istep_end_1s']:
entry['istep_end_1s'] = entry['istep_end']
self.insert1(row=entry, skip_duplicates=True)
no_insert = False
#print("Inserted: " + str(entry))
if no_insert:
print("No new entry inserted.")
return
@schema
class FeatureExtractionParams(dj.Lookup):
definition = """
# Parameters for AllenSDK action potential detection algorithm
params_id : int # unique id for parameter set
---
filter = 10 : float # cutoff frequency for 4-pole low-pass Bessel filter in kHz
dv_cutoff = 4 : float # minimum dV/dt to qualify as a spike in V/s (optional, default 20)
max_interval = 0.02 : float # maximum acceptable time between start of spike and time of peak in sec (optional, default 0.005)
min_height = 10 : float # minimum acceptable height from threshold to peak in mV (optional, default 2)
min_peak = -20 : float # minimum acceptable absolute peak level in mV (optional, default -30)
thresh_frac = 0.05 : float # fraction of average upstroke for threshold calculation (optional, default 0.05)
baseline_interval = 0.1 : float # interval length for baseline voltage calculation (before start if start is defined, default 0.1)
baseline_detect_thresh = 0.3 : float # dV/dt threshold for evaluating flatness of baseline region (optional, default 0.3)
subthresh_min_amp = -80 : float # minimum subthreshold current, not related to spike detection.
n_subthres_sweeps = 4 : smallint # number of hyperpolarizing sweeps for calculating Rin and Tau.
sag_target = -100 : float # Use the sweep with peak Vm closest to this number to calculate Sag.
sag_range_right = -89 : float # the range [left, right] of peak Vm to be considered for sag calculation
sag_range_left = -120 : float # the range [left, right] of peak Vm to be considered for sag calculation
adapt_avg_n_sweeps = 3 : smallint # Use the first n sweeps with >=3 isi's to calculate average adaptation ratio.
adapt_first_n_ratios = 2 : smallint # For each sweep, only average the first n adaptation ratios. If None, average all ratios.
spike_detection_delay = 0.001 : float # start detecting spikes at (start + delay) to skip the initial voltage jump.
suprathreshold_target_delta_v = 15 : float # the amount of current injection at rheobase + I to achive Vm increase by delta_v.
suprathreshold_target_delta_i = 15 : float # evaluate some spike train properties at rheobase + I
latency_target_delta_i = 5 : float # evaluate latency at rheobase + I
"""
@schema
class APandIntrinsicProperties(dj.Imported):
definition = """
# Action potential and intrinsic properties from current injections
-> EphysExperimentsForAnalysis
-> FeatureExtractionParams
-> CurrentStepTimeParams
cell: varchar(128) # cell id
recording: varchar(128) # recording file name
---
has_ap : enum('Yes', 'No') # Yes/No
v_baseline = null : float # mV
bias_current = null : float # pA
tau = null : float #
capacitance = null : float # pF
input_resistance = null : float # MOhm
f_i_curve_slope = null : float # no unit
max_firing_rate = null : float # Hz
sag = null : float # no unit
vm_for_sag = null : longblob # mV
indices_for_sag = null : longblob # no unit
sag_sweep_indices = null : longblob # no unit
ap_threshold = null : float # mV
ap_width = null : float # half height width (peak to trough), ms
ap_height = null : float # peak to trough, mV
ap_peak = null : float # mV
ap_trough = null : float # mV
ap_trough_to_threshold = null : float # AHP amplitude, mV, https://neuroelectro.org/ephys_prop/16/
ap_trough_4w_to_threshold = null : float # fast AHP amplitude at peak + 4 * width, mV
ap_trough_5w_to_threshold = null : float # fast AHP amplitude at peak + 5 * width, mV
ap_peak_to_threshold = null : float # spike amplitude, mV, https://neuroelectro.org/ephys_prop/5/
ap_upstroke = null : float # mV/ms
ap_downstroke = null : float # -mV/ms, positive
ap_updownstroke_ratio = null : float # no unit
ap_trough = null : float # trough within 100 ms from peak, mV
ap_fast_trough = null : float # fast trough defined in allensdk, mV
ap_slow_trough = null : float # slow trough defined in allensdk, mV
ap_adp = null : float # mV
ap_trough_3w = null : float # fast trough at peak + 3 * width, mV
ap_trough_4w = null : float # fast trough at peak + 4 * width, mV
ap_trough_5w = null : float # fast trough at peak + 5 * width, mV
hs_firing_rate = null : float # Hz
avg_firing_rate = null : float # Hz
hs_adaptation = null : float # no unit
hs_median_isi = null : float # ms
hs_latency = null : float # ms
avg_hs_latency = null : float # ms
avg_rheobase_latency = null : float # ms
rheobase_index = null : smallint # no unit
rheobase_stim_amp = null : float # pA
hero_sweep_index = null : smallint # no unit
hero_sweep_stim_amp = null : float # pA
all_firing_rate : longblob
all_stim_amp : longblob
input_resistance_vm : longblob
input_resistance_stim_ap : longblob
all_adaptation : longblob
all_v_baseline : longblob
all_median_isi : longblob
all_first_isi : longblob
all_latency : longblob
spikes_sweep_id : longblob
spikes_threshold_t : longblob
spikes_peak_t: longblob
spikes_trough_t: longblob
spikes_fast_trough_t: longblob
spikes_slow_trough_t: longblob
spikes_adp_t: longblob
spikes_trough_3w_t: longblob
spikes_trough_4w_t: longblob
spikes_trough_5w_t: longblob
spikes_threshold_v: longblob
spikes_peak_v: longblob
spikes_trough_v: longblob
spikes_fast_trough_v: longblob
spikes_slow_trough_v: longblob
spikes_adp_v: longblob
spikes_trough_3w_v: longblob
spikes_trough_4w_v: longblob
spikes_trough_5w_v: longblob
adapt_avg = null : float # average adaptation of the 3 sweeps >= 4Hz (1 sec)
"""
def _make_tuples(self, key):
ephys_exp = (EphysExperimentsForAnalysis() & key).fetch1()
directory = os.path.expanduser(ephys_exp.pop('directory', None))
# use the first second of current injection for analysis, regardless of the actual duration.
istep_start, istep_end_1s = \
(CurrentStepTimeParams() & key).fetch1('istep_start', 'istep_end_1s')
this_sample = (EphysExperimentsForAnalysis() & key)
all_istep_recordings = (EphysRecordings() & "protocol = 'istep'")
cells, istep_recordings = (all_istep_recordings * this_sample).fetch('cell','recording')
params = (FeatureExtractionParams() & key).fetch1()
params_id = params.pop('params_id', None)
for cell, rec in zip(cells, istep_recordings):
print('Populating for: ' + key['experiment'] + ' ' + rec)
abf_file = os.path.join(directory, key['experiment'], rec + '.abf')
data = load_current_step(abf_file, min_voltage=-140)
cell_features, summary_features = \
extract_istep_features(data, start=istep_start, end=istep_end_1s,
**params)
newkey = summary_features.copy()
newkey['has_ap'] = 'Yes' if summary_features['has_ap'] else 'No'
newkey['experiment'] = key['experiment']
newkey['cell'] = cell
newkey['recording'] = rec
newkey['params_id'] = params_id
# _ = newkey.pop('file_id', None)
self.insert1(row=newkey, ignore_extra_fields=True)
return
@schema
class CurrentStepPlots(dj.Imported):
definition = """
# Plot current clamp raw sweeps + detected spikes. Save figures locally. Store file path.
-> APandIntrinsicProperties # TODO actually does not need to depend on this.
---
istep_nogray_pdf_path : varchar(256)
istep_nogray_png_large_path : varchar(256)
istep_pdf_path : varchar(256)
istep_png_large_path : varchar(256)
istep_png_mid_path : varchar(256)
istep_raw_pdf_path : varchar(256)
"""
def _make_tuples(self, key):
ephys_exp = (EphysExperimentsForAnalysis() & key).fetch1()
directory = os.path.expanduser(ephys_exp.pop('directory', None))
rec = key['recording']
print('Populating for: ' + key['experiment'] + ' ' + rec)
abf_file = os.path.join(directory, key['experiment'], rec + '.abf')
data = load_current_step(abf_file, min_voltage=-140)
istep_start, istep_end = \
(CurrentStepTimeParams() & key).fetch1('istep_start', 'istep_end')
params = (FeatureExtractionParams() & key).fetch1()
params_id = params.pop('params_id', None)
# figures/istep_plots_params-1/2018-03-30_EP2-15/
parent_directory = os.path.join(FIG_DIR, 'istep_plots_params-' + str(params_id), key['experiment'])
if not os.path.exists(os.path.join(directory, parent_directory)):
os.makedirs(os.path.join(directory, parent_directory))
# The fetched features only contain AP time points for the 1st second
features_1s = (APandIntrinsicProperties() & key).fetch1()
# To get all spike times, recalculate APs using the entire current step
_ , features = \
extract_istep_features(data, start=istep_start, end=istep_end,
**params)
for filetype in ['istep_nogray', 'istep', 'istep_raw']:
target_folder = os.path.join(directory, parent_directory, filetype)
if not os.path.exists(target_folder):
os.mkdir(target_folder)
fig = plot_current_step(data, fig_height=6, startend=[istep_start, istep_end],
offset=[0.2, 0.4], skip_sweep=1,
blue_sweep=features_1s['hero_sweep_index'],
spikes_t = features['spikes_peak_t'],
spikes_sweep_id = features['spikes_sweep_id'],
bias_current = features['bias_current'],
plot_gray_sweeps = False, lw_scale=2, alpha_scale=1, ilim=[-95,60],
other_features = None,
rheobase_sweep = features_1s['rheobase_index'],
sag_sweeps = features_1s['sag_sweep_indices'][:1],
save=False, rasterized=True)
target_folder = os.path.join(parent_directory, 'istep_nogray')
key['istep_nogray_pdf_path'] = os.path.join(target_folder, 'istep_nogray_' + rec + '.pdf')
fig.savefig(os.path.join(directory, key['istep_nogray_pdf_path']), dpi=300)
key['istep_nogray_png_large_path'] = os.path.join(target_folder, 'istep_nogray_large_' + rec + '.png')
fig.savefig(os.path.join(directory, key['istep_nogray_png_large_path']), dpi=300)
plt.show()
plt.close(fig)
fig = plot_current_step(data, fig_height=6, startend=[istep_start, istep_end],
offset=[0.2, 0.4], skip_sweep=1,
blue_sweep=features_1s['hero_sweep_index'],
spikes_t = features['spikes_peak_t'],
spikes_sweep_id = features['spikes_sweep_id'],
bias_current = features['bias_current'],
other_features = features,
trough_name = 'spikes_trough_5w',
rheobase_sweep = features_1s['rheobase_index'],
sag_sweeps = features_1s['sag_sweep_indices'],
save=False, rasterized=True)
target_folder = os.path.join(parent_directory, 'istep')
key['istep_pdf_path'] = os.path.join(target_folder, 'istep_' + rec + '.pdf')
fig.savefig(os.path.join(directory, key['istep_pdf_path']), dpi=300)
key['istep_png_large_path'] = os.path.join(target_folder, 'istep_large_' + rec + '.png')
fig.savefig(os.path.join(directory, key['istep_png_large_path']), dpi=300)
key['istep_png_mid_path'] = os.path.join(target_folder, 'istep_mid_' + rec + '.png')
fig.savefig(os.path.join(directory, key['istep_png_mid_path']), dpi=200)
plt.show()
plt.close(fig)
fig = plot_current_step(data, fig_height=6, startend=[istep_start, istep_end],
offset=[0.2, 0.4], skip_sweep=1,
blue_sweep=features_1s['hero_sweep_index'],
spikes_t = features['spikes_peak_t'],
spikes_sweep_id = features['spikes_sweep_id'],
bias_current = features['bias_current'],
other_features = None,
rheobase_sweep = features_1s['rheobase_index'],
sag_sweeps = features_1s['sag_sweep_indices'][:1],
save=False, rasterized=False)
target_folder = os.path.join(parent_directory, 'istep_raw')
key['istep_raw_pdf_path'] = os.path.join(target_folder, 'istep_raw_' + rec + '.pdf')
fig.savefig(os.path.join(directory, key['istep_raw_pdf_path']), dpi=200)
plt.show()
plt.close(fig)
self.insert1(row=key)
return
@schema
class AnimatedCurrentStepPlots(dj.Imported):
definition = """
# Plot current clamp raw sweeps + detected spikes. Save figures locally. Store file path.
# Saving the animations is slow (~10s per recording). Skip this to finish the pipeline faster.
-> APandIntrinsicProperties
---
istep_gif_path : varchar(256)
istep_mp4_path : varchar(256)
"""
def _make_tuples(self, key):
ephys_exp = (EphysExperimentsForAnalysis() & key).fetch1()
directory = os.path.expanduser(ephys_exp.pop('directory', None))
rec = key['recording']
print('Populating for: ' + key['experiment'] + ' ' + rec)
abf_file = os.path.join(directory, key['experiment'], rec + '.abf')
data = load_current_step(abf_file, min_voltage=-140)
istep_start, istep_end = \
(CurrentStepTimeParams() & key).fetch1('istep_start', 'istep_end')
params = (FeatureExtractionParams() & key).fetch1()
params_id = params.pop('params_id', None)
# figures/istep_plots_params-1/2018-03-30_EP2-15/
parent_directory = os.path.join(FIG_DIR, 'istep_plots_params-' + str(params_id), key['experiment'])
if not os.path.exists(os.path.join(directory, parent_directory)):
os.makedirs(os.path.join(directory, parent_directory))
# The fetched features only contain AP time points for the 1st second
features_1s = (APandIntrinsicProperties() & key).fetch1()
# To get all spike times, recalculate APs using the entire current step
_ , features = \
extract_istep_features(data, start=istep_start, end=istep_end,
**params)
target_folder = os.path.join(directory, parent_directory, 'istep_animation')
if not os.path.exists(target_folder):
os.mkdir(target_folder)
key['istep_gif_path'] = os.path.join(parent_directory, 'istep_animation', 'istep_' + rec + '.gif')
key['istep_mp4_path'] = os.path.join(parent_directory, 'istep_animation', 'istep_' + rec + '.mp4')
fig_anim, anim = animate_current_step(data, fig_height=6, startend=[istep_start, istep_end], offset=[0.2, 0.4],
spikes_t = features['spikes_peak_t'],
spikes_sweep_id = features['spikes_sweep_id'],
bias_current = features['bias_current'],
save=False, blit = True)
anim.save(os.path.join(directory, key['istep_gif_path']), writer='imagemagick', fps=2.5, dpi=100)
anim.save(os.path.join(directory, key['istep_mp4_path']), writer='ffmpeg', fps=2.5, dpi=100)
plt.close(fig_anim)
gc.collect()
self.insert1(row=key)
return
@schema
class FICurvePlots(dj.Imported):
definition = """
# Plot F-I curve from current clamp recordings. Save figures locally. Store file path.
-> APandIntrinsicProperties
---
fi_svg_path = '' : varchar(256)
fi_png_path = '' : varchar(256)
fi_pdf_path = '' : varchar(256)
"""
def _make_tuples(self, key):
ephys_exp = (EphysExperimentsForAnalysis() & key).fetch1()
directory = os.path.expanduser(ephys_exp.pop('directory', None))
features = (APandIntrinsicProperties() & key).fetch1()
if features['has_ap'] == 'No':
self.insert1(row=key)
return
rec = key['recording']
print('Populating for: ' + key['experiment'] + ' ' + rec)
params = (FeatureExtractionParams() & key).fetch1()
params_id = params.pop('params_id', None)
parent_directory = os.path.join(FIG_DIR, 'istep_plots_params-' + str(params_id), key['experiment'])
if not os.path.exists(os.path.join(directory, parent_directory)):
os.makedirs(os.path.join(directory, parent_directory))
target_folder = os.path.join(directory, parent_directory, 'fi_curve')
if not os.path.exists(target_folder):
os.mkdir(target_folder)
# The fetched features only contain AP time points for the 1st second
# Only use the 1st second for consistency
fi_curve = plot_fi_curve(features['all_stim_amp'], features['all_firing_rate'])
key['fi_png_path'] = os.path.join(parent_directory, 'fi_curve', 'fi_' + rec + '.png')
key['fi_svg_path'] = os.path.join(parent_directory, 'fi_curve', 'fi_' + rec + '.svg')
key['fi_pdf_path'] = os.path.join(parent_directory, 'fi_curve', 'fi_' + rec + '.pdf')
fi_curve.savefig(os.path.join(directory, key['fi_png_path']), dpi=200)
fi_curve.savefig(os.path.join(directory, key['fi_svg_path']), dpi=200)
fi_curve.savefig(os.path.join(directory, key['fi_pdf_path']), dpi=200)
plt.show()
self.insert1(row=key)
return
@schema
class VICurvePlots(dj.Imported):
definition = """
# Plot V-I curve (hyperpolarizing) from current clamp recordings. Save figures locally. Store file path.
-> APandIntrinsicProperties
---
vi_svg_path = '' : varchar(256)
vi_png_path = '' : varchar(256)
vi_pdf_path = '' : varchar(256)
"""
def _make_tuples(self, key):
ephys_exp = (EphysExperimentsForAnalysis() & key).fetch1()
directory = os.path.expanduser(ephys_exp.pop('directory', None))
features = (APandIntrinsicProperties() & key).fetch1()
if features['has_ap'] == 'No':
self.insert1(row=key)
return
rec = key['recording']
print('Populating for: ' + key['experiment'] + ' ' + rec)
params = (FeatureExtractionParams() & key).fetch1()
params_id = params.pop('params_id', None)
parent_directory = os.path.join(FIG_DIR, 'istep_plots_params-' + str(params_id), key['experiment'])
if not os.path.exists(os.path.join(directory, parent_directory)):
os.makedirs(os.path.join(directory, parent_directory))
target_folder = os.path.join(directory, parent_directory, 'vi_curve')
if not os.path.exists(target_folder):
os.mkdir(target_folder)
# The fetched features only contain AP time points for the 1st second
# Only use the 1st second for consistency
vi_curve = plot_vi_curve(features['input_resistance_stim_ap'], features['input_resistance_vm'])
key['vi_png_path'] = os.path.join(parent_directory, 'vi_curve', 'vi_' + rec + '.png')
key['vi_svg_path'] = os.path.join(parent_directory, 'vi_curve', 'vi_' + rec + '.svg')
key['vi_pdf_path'] = os.path.join(parent_directory, 'vi_curve', 'vi_' + rec + '.pdf')
vi_curve.savefig(os.path.join(directory, key['vi_png_path']), dpi=200)
vi_curve.savefig(os.path.join(directory, key['vi_svg_path']), dpi=200)
vi_curve.savefig(os.path.join(directory, key['vi_pdf_path']), dpi=200)
plt.show()
self.insert1(row=key)
return
@schema
class FirstSpikePlots(dj.Imported):
definition = """
# Plot first spikes from current clamp recordings. Save figures locally. Store file path.
-> APandIntrinsicProperties
---
spike_svg_path = '' : varchar(256)
spike_png_path = '' : varchar(256)
spike_pdf_path = '' : varchar(256)
"""
def _make_tuples(self, key):
ephys_exp = (EphysExperimentsForAnalysis() & key).fetch1()
directory = os.path.expanduser(ephys_exp.pop('directory', None))
features = (APandIntrinsicProperties() & key).fetch1()
if features['has_ap'] == 'No':
self.insert1(row=key)
return
rec = key['recording']
print('Populating for: ' + key['experiment'] + ' ' + rec)
params = (FeatureExtractionParams() & key).fetch1()
params_id = params.pop('params_id', None)
parent_directory = os.path.join(FIG_DIR, 'istep_plots_params-' + str(params_id), key['experiment'])
if not os.path.exists(os.path.join(directory, parent_directory)):
os.makedirs(os.path.join(directory, parent_directory))
target_folder = os.path.join(directory, parent_directory, 'first_spike')
if not os.path.exists(target_folder):
os.mkdir(target_folder)
# The fetched features only contain AP time points for the 1st second
# Only use the 1st second for consistency
abf_file = os.path.join(directory, key['experiment'], rec + '.abf')
data = load_current_step(abf_file, min_voltage=-140)
first_spike = plot_first_spike(data, features, time_zero='threshold', lw_scale=1.5)
key['spike_png_path'] = os.path.join(parent_directory, 'first_spike', 'spike_' + rec + '.png')
key['spike_svg_path'] = os.path.join(parent_directory, 'first_spike', 'spike_' + rec + '.svg')
key['spike_pdf_path'] = os.path.join(parent_directory, 'first_spike', 'spike_' + rec + '.pdf')
first_spike.savefig(os.path.join(directory, key['spike_png_path']), dpi=200)
first_spike.savefig(os.path.join(directory, key['spike_svg_path']), dpi=200)
first_spike.savefig(os.path.join(directory, key['spike_pdf_path']), dpi=200)
plt.show()
self.insert1(row=key)
return
@schema
class PhasePlanes(dj.Imported):
definition = """
# Plot phase planes of first spikes. Save figures locally. Store file path.
-> APandIntrinsicProperties
---
phase_svg_path = '' : varchar(256)
phase_png_path = '' : varchar(256)
phase_pdf_path = '' : varchar(256)
"""
def _make_tuples(self, key):
ephys_exp = (EphysExperimentsForAnalysis() & key).fetch1()
directory = os.path.expanduser(ephys_exp.pop('directory', None))
features = (APandIntrinsicProperties() & key).fetch1()
if features['has_ap'] == 'No':
self.insert1(row=key)
return
rec = key['recording']
print('Populating for: ' + key['experiment'] + ' ' + rec)
params = (FeatureExtractionParams() & key).fetch1()
params_id = params.pop('params_id', None)
parent_directory = os.path.join(FIG_DIR, 'istep_plots_params-' + str(params_id), key['experiment'])
if not os.path.exists(os.path.join(directory, parent_directory)):
os.makedirs(os.path.join(directory, parent_directory))
target_folder = os.path.join(directory, parent_directory, 'phase_plane')
if not os.path.exists(target_folder):
os.mkdir(target_folder)
# The fetched features only contain AP time points for the 1st second
# Only use the 1st second for consistency
abf_file = os.path.join(directory, key['experiment'], rec + '.abf')
data = load_current_step(abf_file, min_voltage=-140)
phase_plane = plot_phase_plane(data, features, filter=5.0, lw_scale=1.5) # or use features['filter']
key['phase_png_path'] = os.path.join(parent_directory, 'phase_plane', 'phase_' + rec + '.png')
key['phase_svg_path'] = os.path.join(parent_directory, 'phase_plane', 'phase_' + rec + '.svg')
key['phase_pdf_path'] = os.path.join(parent_directory, 'phase_plane', 'phase_' + rec + '.pdf')
phase_plane.savefig(os.path.join(directory, key['phase_png_path']), dpi=200)
phase_plane.savefig(os.path.join(directory, key['phase_svg_path']), dpi=200)
phase_plane.savefig(os.path.join(directory, key['phase_pdf_path']), dpi=200)
plt.show()
self.insert1(row=key)
return
@schema
class FirstSpikeFirstDerivativePlots(dj.Imported):
definition = """
# Plot first spikes from current clamp recordings. Save figures locally. Store file path.
-> APandIntrinsicProperties
---
spike_dvdt_svg_path = '' : varchar(256)
spike_dvdt_png_path = '' : varchar(256)
spike_dvdt_pdf_path = '' : varchar(256)
"""
def _make_tuples(self, key):
ephys_exp = (EphysExperimentsForAnalysis() & key).fetch1()
directory = os.path.expanduser(ephys_exp.pop('directory', None))
features = (APandIntrinsicProperties() & key).fetch1()
if features['has_ap'] == 'No':
self.insert1(row=key)
return
rec = key['recording']
print('Populating for: ' + key['experiment'] + ' ' + rec)
params = (FeatureExtractionParams() & key).fetch1()
params_id = params.pop('params_id', None)
parent_directory = os.path.join(FIG_DIR, 'istep_plots_params-' + str(params_id), key['experiment'])
if not os.path.exists(os.path.join(directory, parent_directory)):
os.makedirs(os.path.join(directory, parent_directory))
target_folder = os.path.join(directory, parent_directory, 'first_spike_dvdt')
if not os.path.exists(target_folder):
os.mkdir(target_folder)
# The fetched features only contain AP time points for the 1st second
# Only use the 1st second for consistency
abf_file = os.path.join(directory, key['experiment'], rec + '.abf')
data = load_current_step(abf_file, min_voltage=-140)
first_spike = plot_first_spike_dvdt(data, features, time_zero='threshold', filter_dvdt=5.0) # or use features['filter']
key['spike_dvdt_png_path'] = os.path.join(parent_directory, 'first_spike_dvdt', 'spike_dvdt_' + rec + '.png')
key['spike_dvdt_svg_path'] = os.path.join(parent_directory, 'first_spike_dvdt', 'spike_dvdt_' + rec + '.svg')
key['spike_dvdt_pdf_path'] = os.path.join(parent_directory, 'first_spike_dvdt', 'spike_dvdt_' + rec + '.pdf')
first_spike.savefig(os.path.join(directory, key['spike_dvdt_png_path']), dpi=200)
first_spike.savefig(os.path.join(directory, key['spike_dvdt_svg_path']), dpi=200)
first_spike.savefig(os.path.join(directory, key['spike_dvdt_pdf_path']), dpi=200)
plt.show()
self.insert1(row=key)
return
@schema
class FirstSpikeSecondDerivativePlots(dj.Imported):
definition = """
# Plot first spikes from current clamp recordings. Save figures locally. Store file path.
-> APandIntrinsicProperties
---
spike_2nd_derivative_svg_path = '' : varchar(256)
spike_2nd_derivative_png_path = '' : varchar(256)
spike_2nd_derivative_pdf_path = '' : varchar(256)
"""
def _make_tuples(self, key):
ephys_exp = (EphysExperimentsForAnalysis() & key).fetch1()
directory = os.path.expanduser(ephys_exp.pop('directory', None))
features = (APandIntrinsicProperties() & key).fetch1()
if features['has_ap'] == 'No':
self.insert1(row=key)
return
rec = key['recording']
print('Populating for: ' + key['experiment'] + ' ' + rec)
params = (FeatureExtractionParams() & key).fetch1()
params_id = params.pop('params_id', None)
parent_directory = os.path.join(FIG_DIR, 'istep_plots_params-' + str(params_id), key['experiment'])
if not os.path.exists(os.path.join(directory, parent_directory)):
os.makedirs(os.path.join(directory, parent_directory))
target_folder = os.path.join(directory, parent_directory, 'first_spike_2nd_derivative')
if not os.path.exists(target_folder):
os.mkdir(target_folder)
# The fetched features only contain AP time points for the 1st second
# Only use the 1st second for consistency
abf_file = os.path.join(directory, key['experiment'], rec + '.abf')
data = load_current_step(abf_file, min_voltage=-140)
first_spike = plot_first_spike_2nd_derivative(data, features, time_zero='threshold', filter_dvdt=5.0) # or use features['filter']
key['spike_2nd_derivative_png_path'] = os.path.join(parent_directory, 'first_spike_2nd_derivative', 'spike_2nd_derivative_' + rec + '.png')
key['spike_2nd_derivative_svg_path'] = os.path.join(parent_directory, 'first_spike_2nd_derivative', 'spike_2nd_derivative_' + rec + '.svg')
key['spike_2nd_derivative_pdf_path'] = os.path.join(parent_directory, 'first_spike_2nd_derivative', 'spike_2nd_derivative_' + rec + '.pdf')
first_spike.savefig(os.path.join(directory, key['spike_2nd_derivative_png_path']), dpi=200)
first_spike.savefig(os.path.join(directory, key['spike_2nd_derivative_svg_path']), dpi=200)
first_spike.savefig(os.path.join(directory, key['spike_2nd_derivative_pdf_path']), dpi=200)
plt.show()
self.insert1(row=key)
return
@schema
class FirstSpikePlotsMarkersTrough(dj.Imported):
definition = """
# Plot first spikes from current clamp recordings. Save figures locally. Store file path.
-> APandIntrinsicProperties
---
spike_other_markers_svg_path = '' : varchar(256)
spike_other_markers_png_path = '' : varchar(256)
spike_other_markers_pdf_path = '' : varchar(256)
"""
def _make_tuples(self, key):
ephys_exp = (EphysExperimentsForAnalysis() & key).fetch1()
directory = os.path.expanduser(ephys_exp.pop('directory', None))
features = (APandIntrinsicProperties() & key).fetch1()
if features['has_ap'] == 'No':
self.insert1(row=key)
return
rec = key['recording']
print('Populating for: ' + key['experiment'] + ' ' + rec)
params = (FeatureExtractionParams() & key).fetch1()
params_id = params.pop('params_id', None)
parent_directory = os.path.join(FIG_DIR, 'istep_plots_params-' + str(params_id), key['experiment'])
if not os.path.exists(os.path.join(directory, parent_directory)):
os.makedirs(os.path.join(directory, parent_directory))
target_folder = os.path.join(directory, parent_directory, 'first_spike_other_markers')
if not os.path.exists(target_folder):
os.mkdir(target_folder)
# The fetched features only contain AP time points for the 1st second
# Only use the 1st second for consistency
abf_file = os.path.join(directory, key['experiment'], rec + '.abf')
data = load_current_step(abf_file, min_voltage=-140)
other_features = ['spikes_trough', 'spikes_fast_trough', 'spikes_slow_trough',
'spikes_adp', 'spikes_trough_3w', 'spikes_trough_4w', 'spikes_trough_5w']
first_spike = plot_first_spike(data, features, time_zero='threshold',
figsize=(7,4), window=[-10,110],
other_markers={k:v for k, v in zip(other_features, sns.color_palette("husl", len(other_features)).as_hex())})
key['spike_other_markers_png_path'] = os.path.join(parent_directory, 'first_spike_other_markers', 'spike_other_markers_' + rec + '.png')
key['spike_other_markers_svg_path'] = os.path.join(parent_directory, 'first_spike_other_markers', 'spike_other_markers_' + rec + '.svg')
key['spike_other_markers_pdf_path'] = os.path.join(parent_directory, 'first_spike_other_markers', 'spike_other_markers_' + rec + '.pdf')
first_spike.savefig(os.path.join(directory, key['spike_other_markers_png_path']), dpi=200)
first_spike.savefig(os.path.join(directory, key['spike_other_markers_svg_path']), dpi=200)
first_spike.savefig(os.path.join(directory, key['spike_other_markers_pdf_path']), dpi=200)
plt.show()
self.insert1(row=key)
return
@schema
class CombinedPlots(dj.Imported):
definition = """
# Combine F-I, first spike, phase plane and current step plots together.
-> CurrentStepPlots
-> FICurvePlots
-> FirstSpikePlots
-> PhasePlanes
---
small_fi_spike_phase = '' : varchar(256)
small_istep_fi_spike_phase = '' : varchar(256)
mid_fi_spike_phase = '' : varchar(256)
mid_istep_fi_spike_phase = '' : varchar(256)
large_fi_spike_phase = '' : varchar(256)
large_istep_fi_spike_phase = '' : varchar(256)
"""
def _make_tuples(self, key):
ephys_exp = (EphysExperimentsForAnalysis() & key).fetch1()
directory = os.path.expanduser(ephys_exp.pop('directory', None))
fi = (FICurvePlots() & key).fetch1('fi_png_path')
spike = (FirstSpikePlots() & key).fetch1('spike_png_path')
phase = (PhasePlanes() & key).fetch1('phase_png_path')
istep = (CurrentStepPlots() & key).fetch1('istep_png_large_path')
if not (fi and spike and phase and istep):
self.insert1(row=key)
return
rec = key['recording']
print('Populating for: ' + key['experiment'] + ' ' + rec)
params = (FeatureExtractionParams() & key).fetch1()
params_id = params.pop('params_id', None)
parent_directory = os.path.join(FIG_DIR, 'istep_plots_params-' + str(params_id), key['experiment'])
left_large = combine_vertical([Image.open(os.path.join(directory, x)) for x in [fi, spike, phase]], scale=1)
left_mid = left_large.resize([int(x * 0.5) for x in left_large.size], resample=Image.BICUBIC)
left_small = left_large.resize([int(x * 0.2) for x in left_large.size], resample=Image.BICUBIC)
all_large = combine_horizontal([left_large, Image.open(os.path.join(directory, istep))], scale=1)
all_mid = all_large.resize([int(x * 0.5) for x in all_large.size], resample=Image.BICUBIC)
all_small = all_large.resize([int(x * 0.2) for x in all_large.size], resample=Image.BICUBIC)
for fpath, folder, img in zip(['large_fi_spike_phase', 'mid_fi_spike_phase', 'small_fi_spike_phase',
'large_istep_fi_spike_phase', 'mid_istep_fi_spike_phase', 'small_istep_fi_spike_phase'],
['combine_fi_spike_phase'] * 3 + ['combine_istep_fi_spike_phase'] * 3,
[left_large, left_mid, left_small, all_large, all_mid, all_small]):
target_folder = os.path.join(directory, parent_directory, folder)
if not os.path.exists(target_folder):
os.mkdir(target_folder)
key[fpath] = os.path.join(parent_directory, folder, fpath + '_' + rec + '.png')
img.save(os.path.join(directory, key[fpath]))
self.insert1(row=key)
return
@schema
class CombinedPlotsWithText(dj.Imported):
definition = """
# Combine F-I, first spike, phase plane and current step plots together.
-> CurrentStepPlots
-> FICurvePlots
-> VICurvePlots
-> FirstSpikePlots
-> PhasePlanes
-> Animals
-> PatchCells
-> APandIntrinsicProperties
---
small_fi_vi_spike_phase = '' : varchar(256)
mid_fi_vi_spike_phase = '' : varchar(256)
large_fi_vi_spike_phase = '' : varchar(256)
small_istep_fi_vi_spike_phase = '' : varchar(256)
mid_istep_fi_vi_spike_phase = '' : varchar(256)
large_istep_fi_vi_spike_phase = '' : varchar(256)
"""
def _make_tuples(self, key):
ephys_exp = (EphysExperimentsForAnalysis() & key).fetch1()
directory = os.path.expanduser(ephys_exp.pop('directory', None))
fi = (FICurvePlots() & key).fetch1('fi_png_path')
vi = (VICurvePlots() & key).fetch1('vi_png_path')
spike = (FirstSpikePlots() & key).fetch1('spike_png_path')
phase = (PhasePlanes() & key).fetch1('phase_png_path')
istep = (CurrentStepPlots() & key).fetch1('istep_png_large_path')
animal = (Animals() & key).fetch1()
cell = (PatchCells() & key).fetch1()
features_1s = (APandIntrinsicProperties() & key).fetch1()
features_and_meta = OrderedDict()
features_and_meta.update(animal)
features_and_meta.update(cell)
features_and_meta.update(features_1s)
if not (fi and spike and phase and istep):
self.insert1(row=key)
return
rec = key['recording']
print('Populating for: ' + key['experiment'] + ' ' + rec)
params = (FeatureExtractionParams() & key).fetch1()
params_id = params.pop('params_id', None)
parent_directory = os.path.join(FIG_DIR, 'istep_plots_params-' + str(params_id), key['experiment'])
top_large = combine_horizontal([Image.open(os.path.join(directory, x)) for x in [vi, fi]], scale=1)
bot_large = combine_horizontal([Image.open(os.path.join(directory, x)) for x in [phase, spike]], scale=1)
left_large = combine_vertical([top_large, bot_large], scale=1)
left_mid = left_large.resize([int(x * 0.5) for x in left_large.size], resample=Image.BICUBIC)
left_small = left_large.resize([int(x * 0.2) for x in left_large.size], resample=Image.BICUBIC)
left_with_text = combine_vertical([top_large, bot_large, Image.new('RGB', bot_large.size, (255,255,255))], scale=1)
# print metadata and features on the plot
features_keys = ['input_resistance', 'sag', 'capacitance', 'v_rest',
'f_i_curve_slope', 'ap_threshold', 'ap_width', 'ap_peak_to_threshold',
'ap_trough_to_threshold', 'ap_trough_5w_to_threshold', 'ap_upstroke',
'ap_updownstroke_ratio', 'adapt_avg', 'avg_rheobase_latency']
metadata_keys = ['date', 'strain', 'cell', 'recording', 'dob', 'age', 'fill']
features_to_print = [(feature_name_dict[feature], features_and_meta[feature]) for feature in features_keys]
#print(features_to_print)
features_to_print = '\n'.join(["{}: {:.3g}".format(x, y) if isinstance(y, float) else "{}: {}".format(x, y) for x, y in features_to_print])
metadata_to_print = [(metadata, features_and_meta[metadata]) for metadata in metadata_keys]
metadata_to_print = '\n'.join(["{}: {}".format(x, y) for x, y in metadata_to_print])
left_with_text = draw_text_on_image(left_with_text, [metadata_to_print, features_to_print],
[(100,1650), (900,1650)], font_size=38)
all_large = combine_horizontal([left_with_text, Image.open(os.path.join(directory, istep))], scale=1)
all_mid = all_large.resize([int(x * 0.5) for x in all_large.size], resample=Image.BICUBIC)
all_small = all_large.resize([int(x * 0.2) for x in all_large.size], resample=Image.BICUBIC)
for fpath, folder, img in zip(['large_fi_vi_spike_phase', 'mid_fi_vi_spike_phase', 'small_fi_vi_spike_phase',
'large_istep_fi_vi_spike_phase', 'mid_istep_fi_vi_spike_phase', 'small_istep_fi_vi_spike_phase'],
['combine_fi_vi_spike_phase'] * 3 + ['combine_istep_fi_vi_spike_phase'] * 3,
[left_large, left_mid, left_small, all_large, all_mid, all_small]):
target_folder = os.path.join(directory, parent_directory, folder)
if not os.path.exists(target_folder):
os.mkdir(target_folder)
key[fpath] = os.path.join(parent_directory, folder, fpath + '_' + rec + '.png')
img.save(os.path.join(directory, key[fpath]))
self.insert1(row=key)
return
|
[
"os.mkdir",
"current_clamp_features.extract_istep_features",
"PIL.Image.new",
"matplotlib.pyplot.show",
"matplotlib.pyplot.close",
"os.path.exists",
"file_io.load_current_step",
"pandas.isnull",
"pandas.read_excel",
"gc.collect",
"collections.OrderedDict",
"os.path.join"
] |
[((19670, 19726), 'os.path.join', 'os.path.join', (['directory', "key['experiment']", "(rec + '.abf')"], {}), "(directory, key['experiment'], rec + '.abf')\n", (19682, 19726), False, 'import os\n'), ((19742, 19787), 'file_io.load_current_step', 'load_current_step', (['abf_file'], {'min_voltage': '(-140)'}), '(abf_file, min_voltage=-140)\n', (19759, 19787), False, 'from file_io import load_current_step\n'), ((20596, 20668), 'current_clamp_features.extract_istep_features', 'extract_istep_features', (['data'], {'start': 'istep_start', 'end': 'istep_end'}), '(data, start=istep_start, end=istep_end, **params)\n', (20618, 20668), False, 'from current_clamp_features import extract_istep_features\n'), ((21780, 21826), 'os.path.join', 'os.path.join', (['parent_directory', '"""istep_nogray"""'], {}), "(parent_directory, 'istep_nogray')\n", (21792, 21826), False, 'import os\n'), ((21866, 21925), 'os.path.join', 'os.path.join', (['target_folder', "('istep_nogray_' + rec + '.pdf')"], {}), "(target_folder, 'istep_nogray_' + rec + '.pdf')\n", (21878, 21925), False, 'import os\n'), ((22055, 22120), 'os.path.join', 'os.path.join', (['target_folder', "('istep_nogray_large_' + rec + '.png')"], {}), "(target_folder, 'istep_nogray_large_' + rec + '.png')\n", (22067, 22120), False, 'import os\n'), ((22219, 22229), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (22227, 22229), True, 'import matplotlib.pyplot as plt\n'), ((22238, 22252), 'matplotlib.pyplot.close', 'plt.close', (['fig'], {}), '(fig)\n', (22247, 22252), True, 'import matplotlib.pyplot as plt\n'), ((23075, 23114), 'os.path.join', 'os.path.join', (['parent_directory', '"""istep"""'], {}), "(parent_directory, 'istep')\n", (23087, 23114), False, 'import os\n'), ((23147, 23199), 'os.path.join', 'os.path.join', (['target_folder', "('istep_' + rec + '.pdf')"], {}), "(target_folder, 'istep_' + rec + '.pdf')\n", (23159, 23199), False, 'import os\n'), ((23315, 23373), 'os.path.join', 'os.path.join', (['target_folder', "('istep_large_' + rec + '.png')"], {}), "(target_folder, 'istep_large_' + rec + '.png')\n", (23327, 23373), False, 'import os\n'), ((23493, 23549), 'os.path.join', 'os.path.join', (['target_folder', "('istep_mid_' + rec + '.png')"], {}), "(target_folder, 'istep_mid_' + rec + '.png')\n", (23505, 23549), False, 'import os\n'), ((23639, 23649), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (23647, 23649), True, 'import matplotlib.pyplot as plt\n'), ((23658, 23672), 'matplotlib.pyplot.close', 'plt.close', (['fig'], {}), '(fig)\n', (23667, 23672), True, 'import matplotlib.pyplot as plt\n'), ((24429, 24472), 'os.path.join', 'os.path.join', (['parent_directory', '"""istep_raw"""'], {}), "(parent_directory, 'istep_raw')\n", (24441, 24472), False, 'import os\n'), ((24509, 24565), 'os.path.join', 'os.path.join', (['target_folder', "('istep_raw_' + rec + '.pdf')"], {}), "(target_folder, 'istep_raw_' + rec + '.pdf')\n", (24521, 24565), False, 'import os\n'), ((24655, 24665), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (24663, 24665), True, 'import matplotlib.pyplot as plt\n'), ((24674, 24688), 'matplotlib.pyplot.close', 'plt.close', (['fig'], {}), '(fig)\n', (24683, 24688), True, 'import matplotlib.pyplot as plt\n'), ((25411, 25467), 'os.path.join', 'os.path.join', (['directory', "key['experiment']", "(rec + '.abf')"], {}), "(directory, key['experiment'], rec + '.abf')\n", (25423, 25467), False, 'import os\n'), ((25483, 25528), 'file_io.load_current_step', 'load_current_step', (['abf_file'], {'min_voltage': '(-140)'}), '(abf_file, min_voltage=-140)\n', (25500, 25528), False, 'from file_io import load_current_step\n'), ((26337, 26409), 'current_clamp_features.extract_istep_features', 'extract_istep_features', (['data'], {'start': 'istep_start', 'end': 'istep_end'}), '(data, start=istep_start, end=istep_end, **params)\n', (26359, 26409), False, 'from current_clamp_features import extract_istep_features\n'), ((26455, 26515), 'os.path.join', 'os.path.join', (['directory', 'parent_directory', '"""istep_animation"""'], {}), "(directory, parent_directory, 'istep_animation')\n", (26467, 26515), False, 'import os\n'), ((26631, 26705), 'os.path.join', 'os.path.join', (['parent_directory', '"""istep_animation"""', "('istep_' + rec + '.gif')"], {}), "(parent_directory, 'istep_animation', 'istep_' + rec + '.gif')\n", (26643, 26705), False, 'import os\n'), ((26738, 26812), 'os.path.join', 'os.path.join', (['parent_directory', '"""istep_animation"""', "('istep_' + rec + '.mp4')"], {}), "(parent_directory, 'istep_animation', 'istep_' + rec + '.mp4')\n", (26750, 26812), False, 'import os\n'), ((27411, 27430), 'matplotlib.pyplot.close', 'plt.close', (['fig_anim'], {}), '(fig_anim)\n', (27420, 27430), True, 'import matplotlib.pyplot as plt\n'), ((27439, 27451), 'gc.collect', 'gc.collect', ([], {}), '()\n', (27449, 27451), False, 'import gc\n'), ((28619, 28672), 'os.path.join', 'os.path.join', (['directory', 'parent_directory', '"""fi_curve"""'], {}), "(directory, parent_directory, 'fi_curve')\n", (28631, 28672), False, 'import os\n'), ((29001, 29065), 'os.path.join', 'os.path.join', (['parent_directory', '"""fi_curve"""', "('fi_' + rec + '.png')"], {}), "(parent_directory, 'fi_curve', 'fi_' + rec + '.png')\n", (29013, 29065), False, 'import os\n'), ((29095, 29159), 'os.path.join', 'os.path.join', (['parent_directory', '"""fi_curve"""', "('fi_' + rec + '.svg')"], {}), "(parent_directory, 'fi_curve', 'fi_' + rec + '.svg')\n", (29107, 29159), False, 'import os\n'), ((29189, 29253), 'os.path.join', 'os.path.join', (['parent_directory', '"""fi_curve"""', "('fi_' + rec + '.pdf')"], {}), "(parent_directory, 'fi_curve', 'fi_' + rec + '.pdf')\n", (29201, 29253), False, 'import os\n'), ((29499, 29509), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (29507, 29509), True, 'import matplotlib.pyplot as plt\n'), ((30694, 30747), 'os.path.join', 'os.path.join', (['directory', 'parent_directory', '"""vi_curve"""'], {}), "(directory, parent_directory, 'vi_curve')\n", (30706, 30747), False, 'import os\n'), ((31092, 31156), 'os.path.join', 'os.path.join', (['parent_directory', '"""vi_curve"""', "('vi_' + rec + '.png')"], {}), "(parent_directory, 'vi_curve', 'vi_' + rec + '.png')\n", (31104, 31156), False, 'import os\n'), ((31186, 31250), 'os.path.join', 'os.path.join', (['parent_directory', '"""vi_curve"""', "('vi_' + rec + '.svg')"], {}), "(parent_directory, 'vi_curve', 'vi_' + rec + '.svg')\n", (31198, 31250), False, 'import os\n'), ((31280, 31344), 'os.path.join', 'os.path.join', (['parent_directory', '"""vi_curve"""', "('vi_' + rec + '.pdf')"], {}), "(parent_directory, 'vi_curve', 'vi_' + rec + '.pdf')\n", (31292, 31344), False, 'import os\n'), ((31590, 31600), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (31598, 31600), True, 'import matplotlib.pyplot as plt\n'), ((32781, 32837), 'os.path.join', 'os.path.join', (['directory', 'parent_directory', '"""first_spike"""'], {}), "(directory, parent_directory, 'first_spike')\n", (32793, 32837), False, 'import os\n'), ((33067, 33123), 'os.path.join', 'os.path.join', (['directory', "key['experiment']", "(rec + '.abf')"], {}), "(directory, key['experiment'], rec + '.abf')\n", (33079, 33123), False, 'import os\n'), ((33139, 33184), 'file_io.load_current_step', 'load_current_step', (['abf_file'], {'min_voltage': '(-140)'}), '(abf_file, min_voltage=-140)\n', (33156, 33184), False, 'from file_io import load_current_step\n'), ((33310, 33380), 'os.path.join', 'os.path.join', (['parent_directory', '"""first_spike"""', "('spike_' + rec + '.png')"], {}), "(parent_directory, 'first_spike', 'spike_' + rec + '.png')\n", (33322, 33380), False, 'import os\n'), ((33413, 33483), 'os.path.join', 'os.path.join', (['parent_directory', '"""first_spike"""', "('spike_' + rec + '.svg')"], {}), "(parent_directory, 'first_spike', 'spike_' + rec + '.svg')\n", (33425, 33483), False, 'import os\n'), ((33516, 33586), 'os.path.join', 'os.path.join', (['parent_directory', '"""first_spike"""', "('spike_' + rec + '.pdf')"], {}), "(parent_directory, 'first_spike', 'spike_' + rec + '.pdf')\n", (33528, 33586), False, 'import os\n'), ((33850, 33860), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (33858, 33860), True, 'import matplotlib.pyplot as plt\n'), ((35023, 35079), 'os.path.join', 'os.path.join', (['directory', 'parent_directory', '"""phase_plane"""'], {}), "(directory, parent_directory, 'phase_plane')\n", (35035, 35079), False, 'import os\n'), ((35309, 35365), 'os.path.join', 'os.path.join', (['directory', "key['experiment']", "(rec + '.abf')"], {}), "(directory, key['experiment'], rec + '.abf')\n", (35321, 35365), False, 'import os\n'), ((35381, 35426), 'file_io.load_current_step', 'load_current_step', (['abf_file'], {'min_voltage': '(-140)'}), '(abf_file, min_voltage=-140)\n', (35398, 35426), False, 'from file_io import load_current_step\n'), ((35570, 35640), 'os.path.join', 'os.path.join', (['parent_directory', '"""phase_plane"""', "('phase_' + rec + '.png')"], {}), "(parent_directory, 'phase_plane', 'phase_' + rec + '.png')\n", (35582, 35640), False, 'import os\n'), ((35673, 35743), 'os.path.join', 'os.path.join', (['parent_directory', '"""phase_plane"""', "('phase_' + rec + '.svg')"], {}), "(parent_directory, 'phase_plane', 'phase_' + rec + '.svg')\n", (35685, 35743), False, 'import os\n'), ((35776, 35846), 'os.path.join', 'os.path.join', (['parent_directory', '"""phase_plane"""', "('phase_' + rec + '.pdf')"], {}), "(parent_directory, 'phase_plane', 'phase_' + rec + '.pdf')\n", (35788, 35846), False, 'import os\n'), ((36110, 36120), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (36118, 36120), True, 'import matplotlib.pyplot as plt\n'), ((37331, 37392), 'os.path.join', 'os.path.join', (['directory', 'parent_directory', '"""first_spike_dvdt"""'], {}), "(directory, parent_directory, 'first_spike_dvdt')\n", (37343, 37392), False, 'import os\n'), ((37622, 37678), 'os.path.join', 'os.path.join', (['directory', "key['experiment']", "(rec + '.abf')"], {}), "(directory, key['experiment'], rec + '.abf')\n", (37634, 37678), False, 'import os\n'), ((37694, 37739), 'file_io.load_current_step', 'load_current_step', (['abf_file'], {'min_voltage': '(-140)'}), '(abf_file, min_voltage=-140)\n', (37711, 37739), False, 'from file_io import load_current_step\n'), ((37907, 37992), 'os.path.join', 'os.path.join', (['parent_directory', '"""first_spike_dvdt"""', "('spike_dvdt_' + rec + '.png')"], {}), "(parent_directory, 'first_spike_dvdt', 'spike_dvdt_' + rec + '.png'\n )\n", (37919, 37992), False, 'import os\n'), ((38025, 38110), 'os.path.join', 'os.path.join', (['parent_directory', '"""first_spike_dvdt"""', "('spike_dvdt_' + rec + '.svg')"], {}), "(parent_directory, 'first_spike_dvdt', 'spike_dvdt_' + rec + '.svg'\n )\n", (38037, 38110), False, 'import os\n'), ((38143, 38228), 'os.path.join', 'os.path.join', (['parent_directory', '"""first_spike_dvdt"""', "('spike_dvdt_' + rec + '.pdf')"], {}), "(parent_directory, 'first_spike_dvdt', 'spike_dvdt_' + rec + '.pdf'\n )\n", (38155, 38228), False, 'import os\n'), ((38502, 38512), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (38510, 38512), True, 'import matplotlib.pyplot as plt\n'), ((39755, 39826), 'os.path.join', 'os.path.join', (['directory', 'parent_directory', '"""first_spike_2nd_derivative"""'], {}), "(directory, parent_directory, 'first_spike_2nd_derivative')\n", (39767, 39826), False, 'import os\n'), ((40056, 40112), 'os.path.join', 'os.path.join', (['directory', "key['experiment']", "(rec + '.abf')"], {}), "(directory, key['experiment'], rec + '.abf')\n", (40068, 40112), False, 'import os\n'), ((40128, 40173), 'file_io.load_current_step', 'load_current_step', (['abf_file'], {'min_voltage': '(-140)'}), '(abf_file, min_voltage=-140)\n', (40145, 40173), False, 'from file_io import load_current_step\n'), ((40361, 40466), 'os.path.join', 'os.path.join', (['parent_directory', '"""first_spike_2nd_derivative"""', "('spike_2nd_derivative_' + rec + '.png')"], {}), "(parent_directory, 'first_spike_2nd_derivative', \n 'spike_2nd_derivative_' + rec + '.png')\n", (40373, 40466), False, 'import os\n'), ((40509, 40614), 'os.path.join', 'os.path.join', (['parent_directory', '"""first_spike_2nd_derivative"""', "('spike_2nd_derivative_' + rec + '.svg')"], {}), "(parent_directory, 'first_spike_2nd_derivative', \n 'spike_2nd_derivative_' + rec + '.svg')\n", (40521, 40614), False, 'import os\n'), ((40657, 40762), 'os.path.join', 'os.path.join', (['parent_directory', '"""first_spike_2nd_derivative"""', "('spike_2nd_derivative_' + rec + '.pdf')"], {}), "(parent_directory, 'first_spike_2nd_derivative', \n 'spike_2nd_derivative_' + rec + '.pdf')\n", (40669, 40762), False, 'import os\n'), ((41066, 41076), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (41074, 41076), True, 'import matplotlib.pyplot as plt\n'), ((42312, 42382), 'os.path.join', 'os.path.join', (['directory', 'parent_directory', '"""first_spike_other_markers"""'], {}), "(directory, parent_directory, 'first_spike_other_markers')\n", (42324, 42382), False, 'import os\n'), ((42612, 42668), 'os.path.join', 'os.path.join', (['directory', "key['experiment']", "(rec + '.abf')"], {}), "(directory, key['experiment'], rec + '.abf')\n", (42624, 42668), False, 'import os\n'), ((42684, 42729), 'file_io.load_current_step', 'load_current_step', (['abf_file'], {'min_voltage': '(-140)'}), '(abf_file, min_voltage=-140)\n', (42701, 42729), False, 'from file_io import load_current_step\n'), ((43241, 43344), 'os.path.join', 'os.path.join', (['parent_directory', '"""first_spike_other_markers"""', "('spike_other_markers_' + rec + '.png')"], {}), "(parent_directory, 'first_spike_other_markers', \n 'spike_other_markers_' + rec + '.png')\n", (43253, 43344), False, 'import os\n'), ((43386, 43489), 'os.path.join', 'os.path.join', (['parent_directory', '"""first_spike_other_markers"""', "('spike_other_markers_' + rec + '.svg')"], {}), "(parent_directory, 'first_spike_other_markers', \n 'spike_other_markers_' + rec + '.svg')\n", (43398, 43489), False, 'import os\n'), ((43531, 43634), 'os.path.join', 'os.path.join', (['parent_directory', '"""first_spike_other_markers"""', "('spike_other_markers_' + rec + '.pdf')"], {}), "(parent_directory, 'first_spike_other_markers', \n 'spike_other_markers_' + rec + '.pdf')\n", (43543, 43634), False, 'import os\n'), ((43935, 43945), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (43943, 43945), True, 'import matplotlib.pyplot as plt\n'), ((48108, 48121), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (48119, 48121), False, 'from collections import OrderedDict\n'), ((2698, 2750), 'os.path.join', 'os.path.join', (['directory', "(key['experiment'] + '.xlsx')"], {}), "(directory, key['experiment'] + '.xlsx')\n", (2710, 2750), False, 'import os\n'), ((2851, 2880), 'pandas.isnull', 'pd.isnull', (["animal_info['DOB']"], {}), "(animal_info['DOB'])\n", (2860, 2880), True, 'import pandas as pd\n'), ((2929, 2958), 'pandas.isnull', 'pd.isnull', (["animal_info['age']"], {}), "(animal_info['age'])\n", (2938, 2958), True, 'import pandas as pd\n'), ((3196, 3229), 'pandas.isnull', 'pd.isnull', (["animal_info['comment']"], {}), "(animal_info['comment'])\n", (3205, 3229), True, 'import pandas as pd\n'), ((4543, 4595), 'os.path.join', 'os.path.join', (['directory', "(key['experiment'] + '.xlsx')"], {}), "(directory, key['experiment'] + '.xlsx')\n", (4555, 4595), False, 'import os\n'), ((7824, 7876), 'os.path.join', 'os.path.join', (['directory', "(key['experiment'] + '.xlsx')"], {}), "(directory, key['experiment'] + '.xlsx')\n", (7836, 7876), False, 'import os\n'), ((10626, 10651), 'pandas.read_excel', 'pd.read_excel', (['excel_file'], {}), '(excel_file)\n', (10639, 10651), True, 'import pandas as pd\n'), ((18169, 18225), 'os.path.join', 'os.path.join', (['directory', "key['experiment']", "(rec + '.abf')"], {}), "(directory, key['experiment'], rec + '.abf')\n", (18181, 18225), False, 'import os\n'), ((18245, 18290), 'file_io.load_current_step', 'load_current_step', (['abf_file'], {'min_voltage': '(-140)'}), '(abf_file, min_voltage=-140)\n', (18262, 18290), False, 'from file_io import load_current_step\n'), ((18363, 18438), 'current_clamp_features.extract_istep_features', 'extract_istep_features', (['data'], {'start': 'istep_start', 'end': 'istep_end_1s'}), '(data, start=istep_start, end=istep_end_1s, **params)\n', (18385, 18438), False, 'from current_clamp_features import extract_istep_features\n'), ((20782, 20833), 'os.path.join', 'os.path.join', (['directory', 'parent_directory', 'filetype'], {}), '(directory, parent_directory, filetype)\n', (20794, 20833), False, 'import os\n'), ((21946, 21999), 'os.path.join', 'os.path.join', (['directory', "key['istep_nogray_pdf_path']"], {}), "(directory, key['istep_nogray_pdf_path'])\n", (21958, 21999), False, 'import os\n'), ((22141, 22200), 'os.path.join', 'os.path.join', (['directory', "key['istep_nogray_png_large_path']"], {}), "(directory, key['istep_nogray_png_large_path'])\n", (22153, 22200), False, 'import os\n'), ((23220, 23266), 'os.path.join', 'os.path.join', (['directory', "key['istep_pdf_path']"], {}), "(directory, key['istep_pdf_path'])\n", (23232, 23266), False, 'import os\n'), ((23394, 23446), 'os.path.join', 'os.path.join', (['directory', "key['istep_png_large_path']"], {}), "(directory, key['istep_png_large_path'])\n", (23406, 23446), False, 'import os\n'), ((23570, 23620), 'os.path.join', 'os.path.join', (['directory', "key['istep_png_mid_path']"], {}), "(directory, key['istep_png_mid_path'])\n", (23582, 23620), False, 'import os\n'), ((24586, 24636), 'os.path.join', 'os.path.join', (['directory', "key['istep_raw_pdf_path']"], {}), "(directory, key['istep_raw_pdf_path'])\n", (24598, 24636), False, 'import os\n'), ((26531, 26560), 'os.path.exists', 'os.path.exists', (['target_folder'], {}), '(target_folder)\n', (26545, 26560), False, 'import os\n'), ((26574, 26597), 'os.mkdir', 'os.mkdir', (['target_folder'], {}), '(target_folder)\n', (26582, 26597), False, 'import os\n'), ((27214, 27260), 'os.path.join', 'os.path.join', (['directory', "key['istep_gif_path']"], {}), "(directory, key['istep_gif_path'])\n", (27226, 27260), False, 'import os\n'), ((27320, 27366), 'os.path.join', 'os.path.join', (['directory', "key['istep_mp4_path']"], {}), "(directory, key['istep_mp4_path'])\n", (27332, 27366), False, 'import os\n'), ((28688, 28717), 'os.path.exists', 'os.path.exists', (['target_folder'], {}), '(target_folder)\n', (28702, 28717), False, 'import os\n'), ((28731, 28754), 'os.mkdir', 'os.mkdir', (['target_folder'], {}), '(target_folder)\n', (28739, 28754), False, 'import os\n'), ((29279, 29322), 'os.path.join', 'os.path.join', (['directory', "key['fi_png_path']"], {}), "(directory, key['fi_png_path'])\n", (29291, 29322), False, 'import os\n'), ((29358, 29401), 'os.path.join', 'os.path.join', (['directory', "key['fi_svg_path']"], {}), "(directory, key['fi_svg_path'])\n", (29370, 29401), False, 'import os\n'), ((29437, 29480), 'os.path.join', 'os.path.join', (['directory', "key['fi_pdf_path']"], {}), "(directory, key['fi_pdf_path'])\n", (29449, 29480), False, 'import os\n'), ((30763, 30792), 'os.path.exists', 'os.path.exists', (['target_folder'], {}), '(target_folder)\n', (30777, 30792), False, 'import os\n'), ((30806, 30829), 'os.mkdir', 'os.mkdir', (['target_folder'], {}), '(target_folder)\n', (30814, 30829), False, 'import os\n'), ((31370, 31413), 'os.path.join', 'os.path.join', (['directory', "key['vi_png_path']"], {}), "(directory, key['vi_png_path'])\n", (31382, 31413), False, 'import os\n'), ((31449, 31492), 'os.path.join', 'os.path.join', (['directory', "key['vi_svg_path']"], {}), "(directory, key['vi_svg_path'])\n", (31461, 31492), False, 'import os\n'), ((31528, 31571), 'os.path.join', 'os.path.join', (['directory', "key['vi_pdf_path']"], {}), "(directory, key['vi_pdf_path'])\n", (31540, 31571), False, 'import os\n'), ((32853, 32882), 'os.path.exists', 'os.path.exists', (['target_folder'], {}), '(target_folder)\n', (32867, 32882), False, 'import os\n'), ((32896, 32919), 'os.mkdir', 'os.mkdir', (['target_folder'], {}), '(target_folder)\n', (32904, 32919), False, 'import os\n'), ((33615, 33661), 'os.path.join', 'os.path.join', (['directory', "key['spike_png_path']"], {}), "(directory, key['spike_png_path'])\n", (33627, 33661), False, 'import os\n'), ((33700, 33746), 'os.path.join', 'os.path.join', (['directory', "key['spike_svg_path']"], {}), "(directory, key['spike_svg_path'])\n", (33712, 33746), False, 'import os\n'), ((33785, 33831), 'os.path.join', 'os.path.join', (['directory', "key['spike_pdf_path']"], {}), "(directory, key['spike_pdf_path'])\n", (33797, 33831), False, 'import os\n'), ((35095, 35124), 'os.path.exists', 'os.path.exists', (['target_folder'], {}), '(target_folder)\n', (35109, 35124), False, 'import os\n'), ((35138, 35161), 'os.mkdir', 'os.mkdir', (['target_folder'], {}), '(target_folder)\n', (35146, 35161), False, 'import os\n'), ((35875, 35921), 'os.path.join', 'os.path.join', (['directory', "key['phase_png_path']"], {}), "(directory, key['phase_png_path'])\n", (35887, 35921), False, 'import os\n'), ((35960, 36006), 'os.path.join', 'os.path.join', (['directory', "key['phase_svg_path']"], {}), "(directory, key['phase_svg_path'])\n", (35972, 36006), False, 'import os\n'), ((36045, 36091), 'os.path.join', 'os.path.join', (['directory', "key['phase_pdf_path']"], {}), "(directory, key['phase_pdf_path'])\n", (36057, 36091), False, 'import os\n'), ((37408, 37437), 'os.path.exists', 'os.path.exists', (['target_folder'], {}), '(target_folder)\n', (37422, 37437), False, 'import os\n'), ((37451, 37474), 'os.mkdir', 'os.mkdir', (['target_folder'], {}), '(target_folder)\n', (37459, 37474), False, 'import os\n'), ((38252, 38303), 'os.path.join', 'os.path.join', (['directory', "key['spike_dvdt_png_path']"], {}), "(directory, key['spike_dvdt_png_path'])\n", (38264, 38303), False, 'import os\n'), ((38342, 38393), 'os.path.join', 'os.path.join', (['directory', "key['spike_dvdt_svg_path']"], {}), "(directory, key['spike_dvdt_svg_path'])\n", (38354, 38393), False, 'import os\n'), ((38432, 38483), 'os.path.join', 'os.path.join', (['directory', "key['spike_dvdt_pdf_path']"], {}), "(directory, key['spike_dvdt_pdf_path'])\n", (38444, 38483), False, 'import os\n'), ((39842, 39871), 'os.path.exists', 'os.path.exists', (['target_folder'], {}), '(target_folder)\n', (39856, 39871), False, 'import os\n'), ((39885, 39908), 'os.mkdir', 'os.mkdir', (['target_folder'], {}), '(target_folder)\n', (39893, 39908), False, 'import os\n'), ((40786, 40847), 'os.path.join', 'os.path.join', (['directory', "key['spike_2nd_derivative_png_path']"], {}), "(directory, key['spike_2nd_derivative_png_path'])\n", (40798, 40847), False, 'import os\n'), ((40886, 40947), 'os.path.join', 'os.path.join', (['directory', "key['spike_2nd_derivative_svg_path']"], {}), "(directory, key['spike_2nd_derivative_svg_path'])\n", (40898, 40947), False, 'import os\n'), ((40986, 41047), 'os.path.join', 'os.path.join', (['directory', "key['spike_2nd_derivative_pdf_path']"], {}), "(directory, key['spike_2nd_derivative_pdf_path'])\n", (40998, 41047), False, 'import os\n'), ((42398, 42427), 'os.path.exists', 'os.path.exists', (['target_folder'], {}), '(target_folder)\n', (42412, 42427), False, 'import os\n'), ((42441, 42464), 'os.mkdir', 'os.mkdir', (['target_folder'], {}), '(target_folder)\n', (42449, 42464), False, 'import os\n'), ((43658, 43718), 'os.path.join', 'os.path.join', (['directory', "key['spike_other_markers_png_path']"], {}), "(directory, key['spike_other_markers_png_path'])\n", (43670, 43718), False, 'import os\n'), ((43757, 43817), 'os.path.join', 'os.path.join', (['directory', "key['spike_other_markers_svg_path']"], {}), "(directory, key['spike_other_markers_svg_path'])\n", (43769, 43817), False, 'import os\n'), ((43856, 43916), 'os.path.join', 'os.path.join', (['directory', "key['spike_other_markers_pdf_path']"], {}), "(directory, key['spike_other_markers_pdf_path'])\n", (43868, 43916), False, 'import os\n'), ((46455, 46504), 'os.path.join', 'os.path.join', (['directory', 'parent_directory', 'folder'], {}), '(directory, parent_directory, folder)\n', (46467, 46504), False, 'import os\n'), ((46620, 46686), 'os.path.join', 'os.path.join', (['parent_directory', 'folder', "(fpath + '_' + rec + '.png')"], {}), "(parent_directory, folder, fpath + '_' + rec + '.png')\n", (46632, 46686), False, 'import os\n'), ((51239, 51288), 'os.path.join', 'os.path.join', (['directory', 'parent_directory', 'folder'], {}), '(directory, parent_directory, folder)\n', (51251, 51288), False, 'import os\n'), ((51404, 51470), 'os.path.join', 'os.path.join', (['parent_directory', 'folder', "(fpath + '_' + rec + '.png')"], {}), "(parent_directory, folder, fpath + '_' + rec + '.png')\n", (51416, 51470), False, 'import os\n'), ((5010, 5030), 'pandas.isnull', 'pd.isnull', (["row['Rp']"], {}), "(row['Rp'])\n", (5019, 5030), True, 'import pandas as pd\n'), ((5076, 5096), 'pandas.isnull', 'pd.isnull', (["row['Cm']"], {}), "(row['Cm'])\n", (5085, 5096), True, 'import pandas as pd\n'), ((5146, 5166), 'pandas.isnull', 'pd.isnull', (["row['Ra']"], {}), "(row['Ra'])\n", (5155, 5166), True, 'import pandas as pd\n'), ((5216, 5239), 'pandas.isnull', 'pd.isnull', (["row['Vrest']"], {}), "(row['Vrest'])\n", (5225, 5239), True, 'import pandas as pd\n'), ((5292, 5315), 'pandas.isnull', 'pd.isnull', (["row['depth']"], {}), "(row['depth'])\n", (5301, 5315), True, 'import pandas as pd\n'), ((8161, 8184), 'pandas.isnull', 'pd.isnull', (["row['clamp']"], {}), "(row['clamp'])\n", (8170, 8184), True, 'import pandas as pd\n'), ((8244, 8270), 'pandas.isnull', 'pd.isnull', (["row['protocol']"], {}), "(row['protocol'])\n", (8253, 8270), True, 'import pandas as pd\n'), ((8328, 8350), 'pandas.isnull', 'pd.isnull', (["row['hold']"], {}), "(row['hold'])\n", (8337, 8350), True, 'import pandas as pd\n'), ((8400, 8424), 'pandas.isnull', 'pd.isnull', (["row['Ra-pre']"], {}), "(row['Ra-pre'])\n", (8409, 8424), True, 'import pandas as pd\n'), ((8610, 8635), 'pandas.isnull', 'pd.isnull', (["row['Ra-post']"], {}), "(row['Ra-post'])\n", (8619, 8635), True, 'import pandas as pd\n'), ((8919, 8941), 'pandas.isnull', 'pd.isnull', (["row['gain']"], {}), "(row['gain'])\n", (8928, 8941), True, 'import pandas as pd\n'), ((8991, 9015), 'pandas.isnull', 'pd.isnull', (["row['filter']"], {}), "(row['filter'])\n", (9000, 9015), True, 'import pandas as pd\n'), ((9533, 9559), 'pandas.isnull', 'pd.isnull', (["row['response']"], {}), "(row['response'])\n", (9542, 9559), True, 'import pandas as pd\n'), ((20215, 20256), 'os.path.join', 'os.path.join', (['directory', 'parent_directory'], {}), '(directory, parent_directory)\n', (20227, 20256), False, 'import os\n'), ((20283, 20324), 'os.path.join', 'os.path.join', (['directory', 'parent_directory'], {}), '(directory, parent_directory)\n', (20295, 20324), False, 'import os\n'), ((20853, 20882), 'os.path.exists', 'os.path.exists', (['target_folder'], {}), '(target_folder)\n', (20867, 20882), False, 'import os\n'), ((20900, 20923), 'os.mkdir', 'os.mkdir', (['target_folder'], {}), '(target_folder)\n', (20908, 20923), False, 'import os\n'), ((25956, 25997), 'os.path.join', 'os.path.join', (['directory', 'parent_directory'], {}), '(directory, parent_directory)\n', (25968, 25997), False, 'import os\n'), ((26024, 26065), 'os.path.join', 'os.path.join', (['directory', 'parent_directory'], {}), '(directory, parent_directory)\n', (26036, 26065), False, 'import os\n'), ((28483, 28524), 'os.path.join', 'os.path.join', (['directory', 'parent_directory'], {}), '(directory, parent_directory)\n', (28495, 28524), False, 'import os\n'), ((28551, 28592), 'os.path.join', 'os.path.join', (['directory', 'parent_directory'], {}), '(directory, parent_directory)\n', (28563, 28592), False, 'import os\n'), ((30558, 30599), 'os.path.join', 'os.path.join', (['directory', 'parent_directory'], {}), '(directory, parent_directory)\n', (30570, 30599), False, 'import os\n'), ((30626, 30667), 'os.path.join', 'os.path.join', (['directory', 'parent_directory'], {}), '(directory, parent_directory)\n', (30638, 30667), False, 'import os\n'), ((32646, 32687), 'os.path.join', 'os.path.join', (['directory', 'parent_directory'], {}), '(directory, parent_directory)\n', (32658, 32687), False, 'import os\n'), ((32714, 32755), 'os.path.join', 'os.path.join', (['directory', 'parent_directory'], {}), '(directory, parent_directory)\n', (32726, 32755), False, 'import os\n'), ((34888, 34929), 'os.path.join', 'os.path.join', (['directory', 'parent_directory'], {}), '(directory, parent_directory)\n', (34900, 34929), False, 'import os\n'), ((34956, 34997), 'os.path.join', 'os.path.join', (['directory', 'parent_directory'], {}), '(directory, parent_directory)\n', (34968, 34997), False, 'import os\n'), ((37196, 37237), 'os.path.join', 'os.path.join', (['directory', 'parent_directory'], {}), '(directory, parent_directory)\n', (37208, 37237), False, 'import os\n'), ((37264, 37305), 'os.path.join', 'os.path.join', (['directory', 'parent_directory'], {}), '(directory, parent_directory)\n', (37276, 37305), False, 'import os\n'), ((39620, 39661), 'os.path.join', 'os.path.join', (['directory', 'parent_directory'], {}), '(directory, parent_directory)\n', (39632, 39661), False, 'import os\n'), ((39688, 39729), 'os.path.join', 'os.path.join', (['directory', 'parent_directory'], {}), '(directory, parent_directory)\n', (39700, 39729), False, 'import os\n'), ((42177, 42218), 'os.path.join', 'os.path.join', (['directory', 'parent_directory'], {}), '(directory, parent_directory)\n', (42189, 42218), False, 'import os\n'), ((42245, 42286), 'os.path.join', 'os.path.join', (['directory', 'parent_directory'], {}), '(directory, parent_directory)\n', (42257, 42286), False, 'import os\n'), ((46524, 46553), 'os.path.exists', 'os.path.exists', (['target_folder'], {}), '(target_folder)\n', (46538, 46553), False, 'import os\n'), ((46571, 46594), 'os.mkdir', 'os.mkdir', (['target_folder'], {}), '(target_folder)\n', (46579, 46594), False, 'import os\n'), ((46709, 46744), 'os.path.join', 'os.path.join', (['directory', 'key[fpath]'], {}), '(directory, key[fpath])\n', (46721, 46744), False, 'import os\n'), ((49236, 49285), 'PIL.Image.new', 'Image.new', (['"""RGB"""', 'bot_large.size', '(255, 255, 255)'], {}), "('RGB', bot_large.size, (255, 255, 255))\n", (49245, 49285), False, 'from PIL import Image\n'), ((51308, 51337), 'os.path.exists', 'os.path.exists', (['target_folder'], {}), '(target_folder)\n', (51322, 51337), False, 'import os\n'), ((51355, 51378), 'os.mkdir', 'os.mkdir', (['target_folder'], {}), '(target_folder)\n', (51363, 51378), False, 'import os\n'), ((51493, 51528), 'os.path.join', 'os.path.join', (['directory', 'key[fpath]'], {}), '(directory, key[fpath])\n', (51505, 51528), False, 'import os\n'), ((1310, 1335), 'pandas.read_excel', 'pd.read_excel', (['excel_file'], {}), '(excel_file)\n', (1323, 1335), True, 'import pandas as pd\n'), ((5401, 5424), 'pandas.isnull', 'pd.isnull', (["row['fluor']"], {}), "(row['fluor'])\n", (5410, 5424), True, 'import pandas as pd\n'), ((5480, 5500), 'pandas.isnull', 'pd.isnull', (["row['Rm']"], {}), "(row['Rm'])\n", (5489, 5500), True, 'import pandas as pd\n'), ((5554, 5580), 'pandas.isnull', 'pd.isnull', (["row['external']"], {}), "(row['external'])\n", (5563, 5580), True, 'import pandas as pd\n'), ((5647, 5673), 'pandas.isnull', 'pd.isnull', (["row['internal']"], {}), "(row['internal'])\n", (5656, 5673), True, 'import pandas as pd\n'), ((5740, 5766), 'pandas.isnull', 'pd.isnull', (["row['location']"], {}), "(row['location'])\n", (5749, 5766), True, 'import pandas as pd\n'), ((5828, 5850), 'pandas.isnull', 'pd.isnull', (["row['fill']"], {}), "(row['fill'])\n", (5837, 5850), True, 'import pandas as pd\n'), ((45425, 45451), 'os.path.join', 'os.path.join', (['directory', 'x'], {}), '(directory, x)\n', (45437, 45451), False, 'import os\n'), ((45762, 45792), 'os.path.join', 'os.path.join', (['directory', 'istep'], {}), '(directory, istep)\n', (45774, 45792), False, 'import os\n'), ((48721, 48747), 'os.path.join', 'os.path.join', (['directory', 'x'], {}), '(directory, x)\n', (48733, 48747), False, 'import os\n'), ((48829, 48855), 'os.path.join', 'os.path.join', (['directory', 'x'], {}), '(directory, x)\n', (48841, 48855), False, 'import os\n'), ((50522, 50552), 'os.path.join', 'os.path.join', (['directory', 'istep'], {}), '(directory, istep)\n', (50534, 50552), False, 'import os\n')]
|
from pyformance.meters import Meter
from tests import TimedTestCase
class MeterTestCase(TimedTestCase):
def setUp(self):
super(MeterTestCase, self).setUp()
self.meter = Meter(key="test_meter", clock=TimedTestCase.clock)
def tearDown(self):
super(MeterTestCase, self).tearDown()
def test__one_minute_rate(self):
self.meter.mark(3)
self.clock.add(5)
self.meter.tick()
# the EWMA has a rate of 0.6 events/sec after the first tick
self.assertAlmostEqual(0.6, self.meter.get_one_minute_rate(), delta=0.000001)
self.clock.add(60)
# the EWMA has a rate of 0.22072766 events/sec after 1 minute
self.assertAlmostEqual(
0.22072766, self.meter.get_one_minute_rate(), delta=0.000001
)
self.clock.add(60)
# the EWMA has a rate of 0.08120117 events/sec after 2 minute
self.assertAlmostEqual(
0.08120117, self.meter.get_one_minute_rate(), delta=0.000001
)
def test__five_minute_rate(self):
self.meter.mark(3)
self.clock.add(5)
self.meter.tick()
# the EWMA has a rate of 0.6 events/sec after the first tick
self.assertAlmostEqual(0.6, self.meter.get_five_minute_rate(), delta=0.000001)
self.clock.add(60)
# the EWMA has a rate of 0.49123845 events/sec after 1 minute
self.assertAlmostEqual(
0.49123845, self.meter.get_five_minute_rate(), delta=0.000001
)
self.clock.add(60)
# the EWMA has a rate of 0.40219203 events/sec after 2 minute
self.assertAlmostEqual(
0.40219203, self.meter.get_five_minute_rate(), delta=0.000001
)
def test__fifteen_minute_rate(self):
self.meter.mark(3)
self.clock.add(5)
self.meter.tick()
# the EWMA has a rate of 0.6 events/sec after the first tick
self.assertAlmostEqual(
0.6, self.meter.get_fifteen_minute_rate(), delta=0.000001
)
self.clock.add(60)
# the EWMA has a rate of 0.56130419 events/sec after 1 minute
self.assertAlmostEqual(
0.56130419, self.meter.get_fifteen_minute_rate(), delta=0.000001
)
self.clock.add(60)
# the EWMA has a rate of 0.52510399 events/sec after 2 minute
self.assertAlmostEqual(
0.52510399, self.meter.get_fifteen_minute_rate(), delta=0.000001
)
def test__mean_rate(self):
self.meter.mark(60)
self.clock.add(60)
self.meter.tick()
val = self.meter.get_mean_rate()
self.assertEqual(1, val)
|
[
"pyformance.meters.Meter"
] |
[((191, 241), 'pyformance.meters.Meter', 'Meter', ([], {'key': '"""test_meter"""', 'clock': 'TimedTestCase.clock'}), "(key='test_meter', clock=TimedTestCase.clock)\n", (196, 241), False, 'from pyformance.meters import Meter\n')]
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (c) 2021, <NAME> <@alinabuzachis>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = r"""
module: k8s_taint
short_description: Taint a node in a Kubernetes/OpenShift cluster
version_added: "2.3.0"
author: <NAME> (@alinabuzachis)
description:
- Taint allows a node to refuse Pod to be scheduled unless that Pod has a matching toleration.
- Untaint will remove taints from nodes as needed.
extends_documentation_fragment:
- kubernetes.core.k8s_auth_options
options:
state:
description:
- Determines whether to add or remove taints.
type: str
default: present
choices: [ present, absent ]
name:
description:
- The name of the node.
required: true
type: str
taints:
description:
- List containing the taints.
type: list
required: true
elements: dict
suboptions:
key:
description:
- The taint key to be applied to a node.
type: str
value:
description:
- The taint value corresponding to the taint key.
type: str
effect:
description:
- The effect of the taint on Pods that do not tolerate the taint.
- Required when I(state=present).
type: str
choices: [ NoSchedule, NoExecute, PreferNoSchedule ]
replace:
description:
- If C(true), allow taints to be replaced.
required: false
default: false
type: bool
requirements:
- python >= 3.6
- kubernetes >= 12.0.0
"""
EXAMPLES = r"""
- name: Taint node "foo"
kubernetes.core.k8s_taint:
state: present
name: foo
taints:
- effect: NoExecute
key: "key1"
- name: Taint node "foo"
kubernetes.core.k8s_taint:
state: present
name: foo
taints:
- effect: NoExecute
key: "key1"
value: "value1"
- effect: NoSchedule
key: "key1"
value: "value1"
- name: Remove taint from "foo".
kubernetes.core.k8s_taint:
state: absent
name: foo
taints:
- effect: NoExecute
key: "key1"
value: "value1"
"""
RETURN = r"""
result:
description:
- The tainted Node object. Will be empty in the case of a deletion.
returned: success
type: complex
contains:
api_version:
description: The versioned schema of this representation of an object.
returned: success
type: str
kind:
description: Represents the REST resource this object represents.
returned: success
type: str
metadata:
description: Standard object metadata. Includes name, namespace, annotations, labels, etc.
returned: success
type: complex
spec:
description: Specific attributes of the object. Will vary based on the I(api_version) and I(kind).
returned: success
type: complex
status:
description: Current status details for the object.
returned: success
type: complex
"""
import copy
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils._text import to_native
from ansible_collections.kubernetes.core.plugins.module_utils.common import (
K8sAnsibleMixin,
get_api_client,
)
from ansible_collections.kubernetes.core.plugins.module_utils.args_common import (
AUTH_ARG_SPEC,
)
try:
from kubernetes.client.api import core_v1_api
from kubernetes.client.exceptions import ApiException
except ImportError:
# ImportError are managed by the common module already.
pass
def _equal_dicts(a, b):
keys = ["key", "effect"]
if "effect" not in set(a).intersection(b):
keys.remove("effect")
return all((a[x] == b[x] for x in keys))
def _get_difference(a, b):
return [
a_item for a_item in a if not any(_equal_dicts(a_item, b_item) for b_item in b)
]
def _get_intersection(a, b):
return [a_item for a_item in a if any(_equal_dicts(a_item, b_item) for b_item in b)]
def _update_exists(a, b):
return any(
(
any(
_equal_dicts(a_item, b_item)
and a_item.get("value") != b_item.get("value")
for b_item in b
)
for a_item in a
)
)
def argspec():
argument_spec = copy.deepcopy(AUTH_ARG_SPEC)
argument_spec.update(
dict(
state=dict(type="str", choices=["present", "absent"], default="present"),
name=dict(type="str", required=True),
taints=dict(type="list", required=True, elements="dict"),
replace=dict(type="bool", default=False),
)
)
return argument_spec
class K8sTaintAnsible:
def __init__(self, module):
self.module = module
self.k8s_ansible_mixin = K8sAnsibleMixin(module=self.module)
self.k8s_ansible_mixin.client = get_api_client(module=self.module)
self.k8s_ansible_mixin.module = self.module
self.k8s_ansible_mixin.argspec = self.module.argument_spec
self.k8s_ansible_mixin.check_mode = self.module.check_mode
self.k8s_ansible_mixin.params = self.module.params
self.k8s_ansible_mixin.fail_json = self.module.fail_json
self.k8s_ansible_mixin.fail = self.module.fail_json
self.k8s_ansible_mixin.exit_json = self.module.exit_json
self.k8s_ansible_mixin.warn = self.module.warn
self.k8s_ansible_mixin.warnings = []
self.api_instance = core_v1_api.CoreV1Api(self.k8s_ansible_mixin.client.client)
self.k8s_ansible_mixin.check_library_version()
self.changed = False
def get_node(self, name):
try:
node = self.api_instance.read_node(name=name)
except ApiException as exc:
if exc.reason == "Not Found":
self.module.fail_json(msg="Node '{0}' has not been found.".format(name))
self.module.fail_json(
msg="Failed to retrieve node '{0}' due to: {1}".format(
name, exc.reason
),
status=exc.status,
)
except Exception as exc:
self.module.fail_json(
msg="Failed to retrieve node '{0}' due to: {1}".format(
name, to_native(exc)
)
)
return node
def patch_node(self, taints):
body = {"spec": {"taints": taints}}
try:
result = self.api_instance.patch_node(
name=self.module.params.get("name"), body=body
)
except Exception as exc:
self.module.fail_json(
msg="Failed to patch node due to: {0}".format(to_native(exc))
)
return result.to_dict()
def execute_module(self):
result = {"result": {}}
state = self.module.params.get("state")
taints = self.module.params.get("taints")
name = self.module.params.get("name")
node = self.get_node(name)
existing_taints = node.spec.to_dict().get("taints") or []
diff = _get_difference(taints, existing_taints)
if state == "present":
if diff:
# There are new taints to be added
self.changed = True
if self.module.check_mode:
self.module.exit_json(changed=self.changed, **result)
if self.module.params.get("replace"):
# Patch with the new taints
result["result"] = self.patch_node(taints=taints)
self.module.exit_json(changed=self.changed, **result)
result["result"] = self.patch_node(
taints=[*_get_difference(existing_taints, taints), *taints]
)
else:
# No new taints to be added, but maybe there is something to be updated
if _update_exists(existing_taints, taints):
self.changed = True
if self.module.check_mode:
self.module.exit_json(changed=self.changed, **result)
result["result"] = self.patch_node(
taints=[*_get_difference(existing_taints, taints), *taints]
)
else:
result["result"] = node.to_dict()
elif state == "absent":
# Nothing to be removed
if not existing_taints:
result["result"] = node.to_dict()
if not diff:
self.changed = True
if self.module.check_mode:
self.module.exit_json(changed=self.changed, **result)
self.patch_node(taints=_get_difference(existing_taints, taints))
else:
if _get_intersection(existing_taints, taints):
self.changed = True
if self.module.check_mode:
self.module.exit_json(changed=self.changed, **result)
self.patch_node(taints=_get_difference(existing_taints, taints))
else:
self.module.exit_json(changed=self.changed, **result)
self.module.exit_json(changed=self.changed, **result)
def main():
module = AnsibleModule(argument_spec=argspec(), supports_check_mode=True,)
k8s_taint = K8sTaintAnsible(module)
k8s_taint.execute_module()
if __name__ == "__main__":
main()
|
[
"ansible.module_utils._text.to_native",
"copy.deepcopy",
"ansible_collections.kubernetes.core.plugins.module_utils.common.get_api_client",
"ansible_collections.kubernetes.core.plugins.module_utils.common.K8sAnsibleMixin",
"kubernetes.client.api.core_v1_api.CoreV1Api"
] |
[((4708, 4736), 'copy.deepcopy', 'copy.deepcopy', (['AUTH_ARG_SPEC'], {}), '(AUTH_ARG_SPEC)\n', (4721, 4736), False, 'import copy\n'), ((5198, 5233), 'ansible_collections.kubernetes.core.plugins.module_utils.common.K8sAnsibleMixin', 'K8sAnsibleMixin', ([], {'module': 'self.module'}), '(module=self.module)\n', (5213, 5233), False, 'from ansible_collections.kubernetes.core.plugins.module_utils.common import K8sAnsibleMixin, get_api_client\n'), ((5274, 5308), 'ansible_collections.kubernetes.core.plugins.module_utils.common.get_api_client', 'get_api_client', ([], {'module': 'self.module'}), '(module=self.module)\n', (5288, 5308), False, 'from ansible_collections.kubernetes.core.plugins.module_utils.common import K8sAnsibleMixin, get_api_client\n'), ((5872, 5931), 'kubernetes.client.api.core_v1_api.CoreV1Api', 'core_v1_api.CoreV1Api', (['self.k8s_ansible_mixin.client.client'], {}), '(self.k8s_ansible_mixin.client.client)\n', (5893, 5931), False, 'from kubernetes.client.api import core_v1_api\n'), ((6663, 6677), 'ansible.module_utils._text.to_native', 'to_native', (['exc'], {}), '(exc)\n', (6672, 6677), False, 'from ansible.module_utils._text import to_native\n'), ((7082, 7096), 'ansible.module_utils._text.to_native', 'to_native', (['exc'], {}), '(exc)\n', (7091, 7096), False, 'from ansible.module_utils._text import to_native\n')]
|
import os
from geo.data_dir_config import root
test_submission = root + "submissions/submission_val.csv"
img_list_dir = root + "image_lists/"
train_samples = img_list_dir + "train/samples.npy"
train_samples_species_map = img_list_dir + "train/species_map.py"
test_samples = img_list_dir + "test/samples.npy"
#xgb normal training paths
xgb_dir = root + "xgb/"
xgb_model = xgb_dir + "model"
xgb_model_dump = xgb_dir + "model_dump"
xgb_feature_importances = xgb_dir + "feature_importances.pdf"
if not os.path.exists(xgb_dir):
os.makedirs(xgb_dir)
submissions_dir = root + "submissions/"
if not os.path.exists(submissions_dir):
os.makedirs(submissions_dir)
vector_submission = submissions_dir + "vector_submission.csv"
xgb_multimodel_submission = submissions_dir + "xgb_multimodel_submission.csv"
xgb_multimodel_groups_submission = submissions_dir + "xgb_multimodel_groups_submission.csv"
xgb_singlemodel_submission = submissions_dir + "xgb_singlemodel_submission.csv"
random_submission = submissions_dir + "random_submission.csv"
probability_submission = submissions_dir + "probability_submission.csv"
#keras single model training paths
keras_training_dir = root + "keras_training_results/"
keras_training_gt = keras_training_dir + "gt.npy"
keras_training_results = keras_training_dir + "results.npy"
keras_training_species_map = keras_training_dir + "species_map.py"
keras_training_submission = keras_training_dir + "submission.csv"
keras_training_glc_ids = keras_training_dir + "glc_ids.npy"
keras_training_model = keras_training_dir + "model.h5"
#keras multi model training paths
keras_multi_model_training_dir = root + "keras_multi_model_training_results/"
keras_multi_model_training_gt = keras_multi_model_training_dir + "gt.npy"
keras_multi_model_training_results = keras_multi_model_training_dir + "results.npy"
keras_multi_model_training_species_map = keras_multi_model_training_dir + "species_map.py"
keras_multi_model_training_submission = keras_multi_model_training_dir + "submission.csv"
keras_multi_model_training_glc_ids = keras_multi_model_training_dir + "glc_ids.npy"
keras_multi_model_training_model1 = keras_multi_model_training_dir + "model1.h5"
keras_multi_model_training_model2 = keras_multi_model_training_dir + "model2.h5"
keras_multi_model_training_model3 = keras_multi_model_training_dir + "model3.h5"
keras_multi_model_training_model4 = keras_multi_model_training_dir + "model4.h5"
keras_multi_model_training_model5 = keras_multi_model_training_dir + "model5.h5"
keras_multi_model_training_model6 = keras_multi_model_training_dir + "model6.h5"
#keras single model test paths
keras_test_dir = root + "keras_predictions/"
keras_test_results = keras_test_dir + "results.npy"
keras_test_glc_ids = keras_test_dir + "glc_ids.npy"
keras_test_submission = keras_test_dir + "submission.csv"
#keras multi model test paths
keras_multi_model_test_dir = root + "keras_multi_model_predictions/"
keras_multi_model_test_results = keras_multi_model_test_dir + "results.npy"
keras_multi_model_test_glc_ids = keras_multi_model_test_dir + "glc_ids.npy"
keras_multi_model_test_submission = keras_multi_model_test_dir + "submission.csv"
if not os.path.exists(img_list_dir):
os.makedirs(img_list_dir)
os.makedirs(img_list_dir+"train/")
os.makedirs(img_list_dir+"test/")
if not os.path.exists(keras_multi_model_training_dir):
os.makedirs(keras_multi_model_training_dir)
if not os.path.exists(keras_training_dir):
os.makedirs(keras_training_dir)
if not os.path.exists(keras_test_dir):
os.makedirs(keras_test_dir)
if not os.path.exists(keras_multi_model_test_dir):
os.makedirs(keras_multi_model_test_dir)
|
[
"os.path.exists",
"os.makedirs"
] |
[((504, 527), 'os.path.exists', 'os.path.exists', (['xgb_dir'], {}), '(xgb_dir)\n', (518, 527), False, 'import os\n'), ((533, 553), 'os.makedirs', 'os.makedirs', (['xgb_dir'], {}), '(xgb_dir)\n', (544, 553), False, 'import os\n'), ((603, 634), 'os.path.exists', 'os.path.exists', (['submissions_dir'], {}), '(submissions_dir)\n', (617, 634), False, 'import os\n'), ((640, 668), 'os.makedirs', 'os.makedirs', (['submissions_dir'], {}), '(submissions_dir)\n', (651, 668), False, 'import os\n'), ((3167, 3195), 'os.path.exists', 'os.path.exists', (['img_list_dir'], {}), '(img_list_dir)\n', (3181, 3195), False, 'import os\n'), ((3201, 3226), 'os.makedirs', 'os.makedirs', (['img_list_dir'], {}), '(img_list_dir)\n', (3212, 3226), False, 'import os\n'), ((3231, 3267), 'os.makedirs', 'os.makedirs', (["(img_list_dir + 'train/')"], {}), "(img_list_dir + 'train/')\n", (3242, 3267), False, 'import os\n'), ((3270, 3305), 'os.makedirs', 'os.makedirs', (["(img_list_dir + 'test/')"], {}), "(img_list_dir + 'test/')\n", (3281, 3305), False, 'import os\n'), ((3312, 3358), 'os.path.exists', 'os.path.exists', (['keras_multi_model_training_dir'], {}), '(keras_multi_model_training_dir)\n', (3326, 3358), False, 'import os\n'), ((3364, 3407), 'os.makedirs', 'os.makedirs', (['keras_multi_model_training_dir'], {}), '(keras_multi_model_training_dir)\n', (3375, 3407), False, 'import os\n'), ((3416, 3450), 'os.path.exists', 'os.path.exists', (['keras_training_dir'], {}), '(keras_training_dir)\n', (3430, 3450), False, 'import os\n'), ((3456, 3487), 'os.makedirs', 'os.makedirs', (['keras_training_dir'], {}), '(keras_training_dir)\n', (3467, 3487), False, 'import os\n'), ((3496, 3526), 'os.path.exists', 'os.path.exists', (['keras_test_dir'], {}), '(keras_test_dir)\n', (3510, 3526), False, 'import os\n'), ((3532, 3559), 'os.makedirs', 'os.makedirs', (['keras_test_dir'], {}), '(keras_test_dir)\n', (3543, 3559), False, 'import os\n'), ((3568, 3610), 'os.path.exists', 'os.path.exists', (['keras_multi_model_test_dir'], {}), '(keras_multi_model_test_dir)\n', (3582, 3610), False, 'import os\n'), ((3616, 3655), 'os.makedirs', 'os.makedirs', (['keras_multi_model_test_dir'], {}), '(keras_multi_model_test_dir)\n', (3627, 3655), False, 'import os\n')]
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.12 on 2018-06-19 20:30
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('wagtailcore', '0040_page_draft_title'),
('cms', '0005_auto_20180619_1525'),
]
operations = [
migrations.AddField(
model_name='homepage',
name='hero_cta_link',
field=models.ForeignKey(blank=True, help_text='Choose a page to link to for the Call to Action', null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to='wagtailcore.Page', verbose_name='Hero CTA link'),
),
]
|
[
"django.db.models.ForeignKey"
] |
[((488, 721), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'blank': '(True)', 'help_text': '"""Choose a page to link to for the Call to Action"""', 'null': '(True)', 'on_delete': 'django.db.models.deletion.SET_NULL', 'related_name': '"""+"""', 'to': '"""wagtailcore.Page"""', 'verbose_name': '"""Hero CTA link"""'}), "(blank=True, help_text=\n 'Choose a page to link to for the Call to Action', null=True, on_delete\n =django.db.models.deletion.SET_NULL, related_name='+', to=\n 'wagtailcore.Page', verbose_name='Hero CTA link')\n", (505, 721), False, 'from django.db import migrations, models\n')]
|
# coding: utf-8
"""
CLOUD API
IONOS Enterprise-grade Infrastructure as a Service (IaaS) solutions can be managed through the Cloud API, in addition or as an alternative to the \"Data Center Designer\" (DCD) browser-based tool. Both methods employ consistent concepts and features, deliver similar power and flexibility, and can be used to perform a multitude of management tasks, including adding servers, volumes, configuring networks, and so on. # noqa: E501
The version of the OpenAPI document: 6.0
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from ionoscloud.configuration import Configuration
class UserPropertiesPut(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'firstname': 'str',
'lastname': 'str',
'email': 'str',
'password': '<PASSWORD>',
'administrator': 'bool',
'force_sec_auth': 'bool',
'sec_auth_active': 'bool',
'active': 'bool',
}
attribute_map = {
'firstname': 'firstname',
'lastname': 'lastname',
'email': 'email',
'password': 'password',
'administrator': 'administrator',
'force_sec_auth': 'forceSecAuth',
'sec_auth_active': 'secAuthActive',
'active': 'active',
}
def __init__(self, firstname=None, lastname=None, email=None, password=None, administrator=None, force_sec_auth=None, sec_auth_active=None, active=None, local_vars_configuration=None): # noqa: E501
"""UserPropertiesPut - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._firstname = None
self._lastname = None
self._email = None
self._password = None
self._administrator = None
self._force_sec_auth = None
self._sec_auth_active = None
self._active = None
self.discriminator = None
if firstname is not None:
self.firstname = firstname
if lastname is not None:
self.lastname = lastname
if email is not None:
self.email = email
if password is not None:
self.password = password
if administrator is not None:
self.administrator = administrator
if force_sec_auth is not None:
self.force_sec_auth = force_sec_auth
if sec_auth_active is not None:
self.sec_auth_active = sec_auth_active
if active is not None:
self.active = active
@property
def firstname(self):
"""Gets the firstname of this UserPropertiesPut. # noqa: E501
The first name of the user. # noqa: E501
:return: The firstname of this UserPropertiesPut. # noqa: E501
:rtype: str
"""
return self._firstname
@firstname.setter
def firstname(self, firstname):
"""Sets the firstname of this UserPropertiesPut.
The first name of the user. # noqa: E501
:param firstname: The firstname of this UserPropertiesPut. # noqa: E501
:type firstname: str
"""
self._firstname = firstname
@property
def lastname(self):
"""Gets the lastname of this UserPropertiesPut. # noqa: E501
The last name of the user. # noqa: E501
:return: The lastname of this UserPropertiesPut. # noqa: E501
:rtype: str
"""
return self._lastname
@lastname.setter
def lastname(self, lastname):
"""Sets the lastname of this UserPropertiesPut.
The last name of the user. # noqa: E501
:param lastname: The lastname of this UserPropertiesPut. # noqa: E501
:type lastname: str
"""
self._lastname = lastname
@property
def email(self):
"""Gets the email of this UserPropertiesPut. # noqa: E501
The email address of the user. # noqa: E501
:return: The email of this UserPropertiesPut. # noqa: E501
:rtype: str
"""
return self._email
@email.setter
def email(self, email):
"""Sets the email of this UserPropertiesPut.
The email address of the user. # noqa: E501
:param email: The email of this UserPropertiesPut. # noqa: E501
:type email: str
"""
self._email = email
@property
def password(self):
"""Gets the password of this UserPropertiesPut. # noqa: E501
password of the user # noqa: E501
:return: The password of this UserPropertiesPut. # noqa: E501
:rtype: str
"""
return self._password
@password.setter
def password(self, password):
"""Sets the password of this UserPropertiesPut.
password of the user # noqa: E501
:param password: The password of this UserPropertiesPut. # noqa: E501
:type password: str
"""
self._password = password
@property
def administrator(self):
"""Gets the administrator of this UserPropertiesPut. # noqa: E501
Indicates if the user has admin rights. # noqa: E501
:return: The administrator of this UserPropertiesPut. # noqa: E501
:rtype: bool
"""
return self._administrator
@administrator.setter
def administrator(self, administrator):
"""Sets the administrator of this UserPropertiesPut.
Indicates if the user has admin rights. # noqa: E501
:param administrator: The administrator of this UserPropertiesPut. # noqa: E501
:type administrator: bool
"""
self._administrator = administrator
@property
def force_sec_auth(self):
"""Gets the force_sec_auth of this UserPropertiesPut. # noqa: E501
Indicates if secure authentication should be forced on the user. # noqa: E501
:return: The force_sec_auth of this UserPropertiesPut. # noqa: E501
:rtype: bool
"""
return self._force_sec_auth
@force_sec_auth.setter
def force_sec_auth(self, force_sec_auth):
"""Sets the force_sec_auth of this UserPropertiesPut.
Indicates if secure authentication should be forced on the user. # noqa: E501
:param force_sec_auth: The force_sec_auth of this UserPropertiesPut. # noqa: E501
:type force_sec_auth: bool
"""
self._force_sec_auth = force_sec_auth
@property
def sec_auth_active(self):
"""Gets the sec_auth_active of this UserPropertiesPut. # noqa: E501
Indicates if secure authentication is active for the user. # noqa: E501
:return: The sec_auth_active of this UserPropertiesPut. # noqa: E501
:rtype: bool
"""
return self._sec_auth_active
@sec_auth_active.setter
def sec_auth_active(self, sec_auth_active):
"""Sets the sec_auth_active of this UserPropertiesPut.
Indicates if secure authentication is active for the user. # noqa: E501
:param sec_auth_active: The sec_auth_active of this UserPropertiesPut. # noqa: E501
:type sec_auth_active: bool
"""
self._sec_auth_active = sec_auth_active
@property
def active(self):
"""Gets the active of this UserPropertiesPut. # noqa: E501
Indicates if the user is active. # noqa: E501
:return: The active of this UserPropertiesPut. # noqa: E501
:rtype: bool
"""
return self._active
@active.setter
def active(self, active):
"""Sets the active of this UserPropertiesPut.
Indicates if the user is active. # noqa: E501
:param active: The active of this UserPropertiesPut. # noqa: E501
:type active: bool
"""
self._active = active
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, UserPropertiesPut):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, UserPropertiesPut):
return True
return self.to_dict() != other.to_dict()
|
[
"six.iteritems",
"ionoscloud.configuration.Configuration"
] |
[((8430, 8463), 'six.iteritems', 'six.iteritems', (['self.openapi_types'], {}), '(self.openapi_types)\n', (8443, 8463), False, 'import six\n'), ((2090, 2105), 'ionoscloud.configuration.Configuration', 'Configuration', ([], {}), '()\n', (2103, 2105), False, 'from ionoscloud.configuration import Configuration\n')]
|
##############################################################################
#
# Copyright (c) 2001 Zope Foundation and Contributors.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
"""Caching tool implementation.
"""
from AccessControl.class_init import InitializeClass
from AccessControl.SecurityInfo import ClassSecurityInfo
from App.Common import rfc1123_date
from App.special_dtml import DTMLFile
from DateTime.DateTime import DateTime
from OFS.Cache import ZCM_MANAGERS
from OFS.Cache import Cache
from OFS.Cache import CacheManager
from OFS.Cache import getVerifiedManagerIds
from OFS.interfaces import IObjectWillBeMovedEvent
from OFS.SimpleItem import SimpleItem
from Persistence import PersistentMapping
from Products.PageTemplates.Expressions import SecureModuleImporter
from Products.PageTemplates.Expressions import getEngine
from zope.component import getUtility
from zope.container.interfaces import IObjectMovedEvent
from zope.interface import implementer
from .Expression import Expression
from .interfaces import ICachingPolicy
from .interfaces import ICachingPolicyManager
from .interfaces import IMembershipTool
from .permissions import ManagePortal
from .permissions import View
from .utils import _dtmldir
from .utils import _setCacheHeaders
from .utils import _ViewEmulator
from .utils import registerToolInterface
# This is lame :(
# This listing is used to decide whether to wrap an object inside a "fake view"
# for the OFS.Cache caching. If it is a view type, no fake view wrap is needed.
VIEW_METATYPES = ('Page Template', 'DTML Method', 'DTML Document',
'Filesystem DTML Method', 'Filesystem Page Template')
def createCPContext(content, view_method, keywords, time=None):
"""
Construct an expression context for TALES expressions,
for use by CachingPolicy objects.
"""
mtool = getUtility(IMembershipTool)
if mtool.isAnonymousUser():
member = None
else:
member = mtool.getAuthenticatedMember()
if time is None:
time = DateTime()
# The name "content" is deprecated and will go away in CMF 2.0,
# please use "object" in your policy
data = {'content': content,
'object': content,
'view': view_method,
'keywords': keywords,
'request': getattr(content, 'REQUEST', {}),
'member': member,
'modules': SecureModuleImporter,
'nothing': None,
'time': time}
return getEngine().getContext(data)
class CPMCache(Cache):
""" Simple OFS.Cache-implementation
"""
security = ClassSecurityInfo()
@security.private
def ZCache_invalidate(self, ob):
""" An object is forced out of the cache
This implementation stores nothing and does not attempt to
communicate with cache servers, so this is a no-op.
"""
pass
@security.private
def ZCache_get(self, ob, view_name, keywords, mtime_func, default):
""" An object is retrieved from the cache
This implementation stores nothing - a no-op.
"""
pass
@security.private
def ZCache_set(self, ob, data, view_name, keywords, mtime_func):
""" An object is pushed into the cache
Even though this cache implementation does not cache anything per se,
this method is used as a suitable hook to activate the real heavy
lifting done by the CachePolicyManager.
"""
if ob.meta_type not in VIEW_METATYPES:
ob = _ViewEmulator().__of__(ob)
return _setCacheHeaders(ob, extra_context={})
InitializeClass(CPMCache)
@implementer(ICachingPolicy)
class CachingPolicy:
"""
Represent a single class of cachable objects:
- class membership is defined by 'predicate', a TALES expression
with access to the following top-level names:
'object' -- the object itself
'view' -- the name of the view method
'keywords' -- keywords passed to the request
'request' -- the REQUEST object itself
'member' -- the authenticated member, or None if anonymous
'modules' -- usual TALES access-with-import
'nothing' -- None
'time' -- A DateTime object for the current date and time
- mtime_func is used to set the "Last-modified" HTTP response
header, which is another TALES expression evaluated
against the same namespace. If not specified explicitly,
uses 'object/modified'. mtime_func is also used in responding
to conditional GETs.
- The "Expires" HTTP response header and the "max-age" token of
the "Cache-control" header will be set using 'max_age_secs',
if passed; it should be an integer value in seconds.
- The "s-maxage" token of the "Cache-control" header will be
set using 's_max_age_secs', if passed; it should be an integer
value in seconds.
- The "Vary" HTTP response headers will be set if a value is
provided. The Vary header is described in RFC 2616. In essence,
it instructs caches that respect this header (such as Squid
after version 2.4) to distinguish between requests not just by
the request URL, but also by values found in the headers showing
in the Vary tag. "Vary: Cookie" would force Squid to also take
Cookie headers into account when deciding what cached object to
choose and serve in response to a request.
- The "ETag" HTTP response header will be set if a value is
provided. The value is a TALES expression and the result
after evaluation will be used as the ETag header value.
- Other tokens will be added to the "Cache-control" HTTP response
header as follows:
'no_cache=1' argument => "no-cache" token
'no_store=1' argument => "no-store" token
'must_revalidate=1' argument => "must-revalidate" token
'proxy_revalidate=1' argument => "proxy-revalidate" token
'public=1' argument => "public" token
'private=1' argument => "private" token
'no_transform=1' argument => "no-transform" token
- The last_modified argument is used to determine whether to add a
Last-Modified header. last_modified=1 by default. There appears
to be a bug in IE 6 (and possibly other versions) that uses the
Last-Modified header plus some heuristics rather than the other
explicit caching headers to determine whether to render content
from the cache. If you set, say, max-age=0, must-revalidate and
have a Last-Modified header some time in the past, IE will
recognize that the page in cache is stale and will request an
update from the server BUT if you have a Last-Modified header
with an older date, will then ignore the update and render from
the cache, so you may want to disable the Last-Modified header
when controlling caching using Cache-Control headers.
- The pre-check and post-check Cache-Control tokens are Microsoft
proprietary tokens added to IE 5+. Documentation can be found
here: http://msdn.microsoft.com/workshop/author/perf/perftips.asp
Unfortunately these are needed to make IE behave correctly.
"""
def __init__(self,
policy_id,
predicate='',
mtime_func='',
max_age_secs=None,
no_cache=0,
no_store=0,
must_revalidate=0,
vary='',
etag_func='',
s_max_age_secs=None,
proxy_revalidate=0,
public=0,
private=0,
no_transform=0,
enable_304s=0,
last_modified=1,
pre_check=None,
post_check=None):
if not predicate:
predicate = 'python:1'
if not mtime_func:
mtime_func = 'object/modified'
if max_age_secs is not None:
if str(max_age_secs).strip() == '':
max_age_secs = None
else:
max_age_secs = int(max_age_secs)
if s_max_age_secs is not None:
if str(s_max_age_secs).strip() == '':
s_max_age_secs = None
else:
s_max_age_secs = int(s_max_age_secs)
if pre_check is not None:
if str(pre_check).strip() == '':
pre_check = None
else:
pre_check = int(pre_check)
if post_check is not None:
if str(post_check).strip() == '':
post_check = None
else:
post_check = int(post_check)
self._policy_id = policy_id
self._predicate = Expression(text=predicate)
self._mtime_func = Expression(text=mtime_func)
self._max_age_secs = max_age_secs
self._s_max_age_secs = s_max_age_secs
self._no_cache = int(no_cache)
self._no_store = int(no_store)
self._must_revalidate = int(must_revalidate)
self._proxy_revalidate = int(proxy_revalidate)
self._public = int(public)
self._private = int(private)
self._no_transform = int(no_transform)
self._vary = vary
self._etag_func = Expression(text=etag_func)
self._enable_304s = int(enable_304s)
self._last_modified = int(last_modified)
self._pre_check = pre_check
self._post_check = post_check
def getPolicyId(self):
"""
"""
return self._policy_id
def getPredicate(self):
"""
"""
return self._predicate.text
def getMTimeFunc(self):
"""
"""
return self._mtime_func.text
def getMaxAgeSecs(self):
"""
"""
return self._max_age_secs
def getSMaxAgeSecs(self):
"""
"""
return getattr(self, '_s_max_age_secs', None)
def getNoCache(self):
"""
"""
return self._no_cache
def getNoStore(self):
"""
"""
return self._no_store
def getMustRevalidate(self):
"""
"""
return self._must_revalidate
def getProxyRevalidate(self):
"""
"""
return getattr(self, '_proxy_revalidate', 0)
def getPublic(self):
"""
"""
return getattr(self, '_public', 0)
def getPrivate(self):
"""
"""
return getattr(self, '_private', 0)
def getNoTransform(self):
"""
"""
return getattr(self, '_no_transform', 0)
def getVary(self):
"""
"""
return getattr(self, '_vary', '')
def getETagFunc(self):
"""
"""
etag_func_text = ''
etag_func = getattr(self, '_etag_func', None)
if etag_func is not None:
etag_func_text = etag_func.text
return etag_func_text
def getEnable304s(self):
"""
"""
return getattr(self, '_enable_304s', 0)
def getLastModified(self):
"""Should we set the last modified header?"""
return getattr(self, '_last_modified', 1)
def getPreCheck(self):
"""
"""
return getattr(self, '_pre_check', None)
def getPostCheck(self):
"""
"""
return getattr(self, '_post_check', None)
def testPredicate(self, expr_context):
""" Does this request match our predicate?"""
return self._predicate(expr_context)
def getHeaders(self, expr_context):
"""
Does this request match our predicate? If so, return a
sequence of caching headers as ( key, value ) tuples.
Otherwise, return an empty sequence.
"""
headers = []
if self.testPredicate(expr_context):
if self.getLastModified():
mtime = self._mtime_func(expr_context)
if isinstance(mtime, str):
mtime = DateTime(mtime)
if mtime is not None:
mtime_str = rfc1123_date(mtime.timeTime())
headers.append(('Last-modified', mtime_str))
control = []
if self.getMaxAgeSecs() is not None:
now = expr_context.vars['time']
exp_time_str = rfc1123_date(now.timeTime()
+ self._max_age_secs)
headers.append(('Expires', exp_time_str))
control.append('max-age=%d' % self._max_age_secs)
if self.getSMaxAgeSecs() is not None:
control.append('s-maxage=%d' % self._s_max_age_secs)
if self.getNoCache():
control.append('no-cache')
# The following is for HTTP 1.0 clients
headers.append(('Pragma', 'no-cache'))
if self.getNoStore():
control.append('no-store')
if self.getPublic():
control.append('public')
if self.getPrivate():
control.append('private')
if self.getMustRevalidate():
control.append('must-revalidate')
if self.getProxyRevalidate():
control.append('proxy-revalidate')
if self.getNoTransform():
control.append('no-transform')
pre_check = self.getPreCheck()
if pre_check is not None:
control.append('pre-check=%d' % pre_check)
post_check = self.getPostCheck()
if post_check is not None:
control.append('post-check=%d' % post_check)
if control:
headers.append(('Cache-control', ', '.join(control)))
if self.getVary():
headers.append(('Vary', self._vary))
if self.getETagFunc():
headers.append(('ETag', self._etag_func(expr_context)))
return headers
@implementer(ICachingPolicyManager)
class CachingPolicyManager(SimpleItem, CacheManager):
"""
Manage the set of CachingPolicy objects for the site; dispatch
to them from skin methods.
"""
id = 'caching_policy_manager'
meta_type = 'CMF Caching Policy Manager'
zmi_icon = 'fa fa-rocket'
_isCacheManager = 1 # Dead chicken. Yum.
security = ClassSecurityInfo()
def __init__(self):
self._policy_ids = ()
self._policies = PersistentMapping()
#
# ZMI
#
manage_options = (
({'label': 'Policies', 'action': 'manage_cachingPolicies',
'help': ('CMFCore', 'CPMPolicies.stx')},) +
CacheManager.manage_options +
SimpleItem.manage_options)
security.declareProtected(ManagePortal, # NOQA: flake8: D001
'manage_cachingPolicies')
manage_cachingPolicies = DTMLFile('cachingPolicies', _dtmldir)
@security.public
def listPolicies(self):
"""List '(id, (policy, typeObjectName))' tuples for all policies.
"""
return tuple([(id, self._policies[id]) for id in self._policy_ids])
@security.protected(ManagePortal)
def addPolicy(self,
policy_id,
predicate, # TALES expr (def. 'python:1')
mtime_func, # TALES expr (def. 'object/modified')
max_age_secs, # integer, seconds (def. 0)
no_cache, # boolean (def. 0)
no_store, # boolean (def. 0)
must_revalidate, # boolean (def. 0)
vary, # string value
etag_func, # TALES expr (def. '')
REQUEST=None,
s_max_age_secs=None, # integer, seconds (def. None)
proxy_revalidate=0, # boolean (def. 0)
public=0, # boolean (def. 0)
private=0, # boolean (def. 0)
no_transform=0, # boolean (def. 0)
enable_304s=0, # boolean (def. 0)
last_modified=1, # boolean (def. 1)
pre_check=None, # integer, default None
post_check=None): # integer, default None
"""
Add a caching policy.
"""
if max_age_secs is None or str(max_age_secs).strip() == '':
max_age_secs = None
else:
max_age_secs = int(max_age_secs)
if s_max_age_secs is None or str(s_max_age_secs).strip() == '':
s_max_age_secs = None
else:
s_max_age_secs = int(s_max_age_secs)
if pre_check is None or str(pre_check).strip() == '':
pre_check = None
else:
pre_check = int(pre_check)
if post_check is None or str(post_check).strip() == '':
post_check = None
else:
post_check = int(post_check)
self._addPolicy(policy_id,
predicate,
mtime_func,
max_age_secs,
no_cache,
no_store,
must_revalidate,
vary,
etag_func,
s_max_age_secs,
proxy_revalidate,
public,
private,
no_transform,
enable_304s,
last_modified,
pre_check,
post_check)
if REQUEST is not None:
REQUEST['RESPONSE'].redirect(self.absolute_url()
+ '/manage_cachingPolicies'
+ '?manage_tabs_message='
+ 'Policy+added.')
@security.protected(ManagePortal)
def updatePolicy(self,
policy_id,
predicate, # TALES expr (def. 'python:1')
mtime_func, # TALES expr (def. 'object/modified')
max_age_secs, # integer, seconds (def. 0)
no_cache, # boolean (def. 0)
no_store, # boolean (def. 0)
must_revalidate, # boolean (def. 0)
vary, # string value
etag_func, # TALES expr (def. '')
REQUEST=None,
s_max_age_secs=None, # integer, seconds (def. 0)
proxy_revalidate=0, # boolean (def. 0)
public=0, # boolean (def. 0)
private=0, # boolean (def. 0)
no_transform=0, # boolean (def. 0)
enable_304s=0, # boolean (def. 0)
last_modified=1, # boolean (def. 1)
pre_check=0, # integer, default=None
post_check=0): # integer, default=None
"""
Update a caching policy.
"""
if max_age_secs is None or str(max_age_secs).strip() == '':
max_age_secs = None
else:
max_age_secs = int(max_age_secs)
if s_max_age_secs is None or str(s_max_age_secs).strip() == '':
s_max_age_secs = None
else:
s_max_age_secs = int(s_max_age_secs)
if pre_check is None or str(pre_check).strip() == '':
pre_check = None
else:
pre_check = int(pre_check)
if post_check is None or str(post_check).strip() == '':
post_check = None
else:
post_check = int(post_check)
self._updatePolicy(policy_id,
predicate,
mtime_func,
max_age_secs,
no_cache,
no_store,
must_revalidate,
vary,
etag_func,
s_max_age_secs,
proxy_revalidate,
public,
private,
no_transform,
enable_304s,
last_modified,
pre_check,
post_check)
if REQUEST is not None:
REQUEST['RESPONSE'].redirect(self.absolute_url()
+ '/manage_cachingPolicies'
+ '?manage_tabs_message='
+ 'Policy+updated.')
@security.protected(ManagePortal)
def movePolicyUp(self, policy_id, REQUEST=None):
"""
Move a caching policy up in the list.
"""
policy_ids = list(self._policy_ids)
ndx = policy_ids.index(policy_id)
if ndx == 0:
msg = 'Policy+already+first.'
else:
self._reorderPolicy(policy_id, ndx - 1)
msg = 'Policy+moved.'
if REQUEST is not None:
REQUEST['RESPONSE'].redirect(self.absolute_url()
+ '/manage_cachingPolicies'
+ '?manage_tabs_message=%s' % msg)
@security.protected(ManagePortal)
def movePolicyDown(self, policy_id, REQUEST=None):
"""
Move a caching policy down in the list.
"""
policy_ids = list(self._policy_ids)
ndx = policy_ids.index(policy_id)
if ndx == len(policy_ids) - 1:
msg = 'Policy+already+last.'
else:
self._reorderPolicy(policy_id, ndx + 1)
msg = 'Policy+moved.'
if REQUEST is not None:
REQUEST['RESPONSE'].redirect(self.absolute_url()
+ '/manage_cachingPolicies'
+ '?manage_tabs_message=%s' % msg)
@security.protected(ManagePortal)
def removePolicy(self, policy_id, REQUEST=None):
"""
Remove a caching policy.
"""
self._removePolicy(policy_id)
if REQUEST is not None:
pth = '/manage_cachingPolicies?manage_tabs_message=Policy+removed.'
REQUEST['RESPONSE'].redirect('%s%s' % (self.absolute_url(), pth))
#
# Policy manipulation methods.
#
@security.private
def _addPolicy(self,
policy_id,
predicate,
mtime_func,
max_age_secs,
no_cache,
no_store,
must_revalidate,
vary,
etag_func,
s_max_age_secs=None,
proxy_revalidate=0,
public=0,
private=0,
no_transform=0,
enable_304s=0,
last_modified=1,
pre_check=None,
post_check=None):
"""
Add a policy to our registry.
"""
policy_id = str(policy_id).strip()
if not policy_id:
raise ValueError('Policy ID is required!')
if policy_id in self._policy_ids:
raise KeyError('Policy %s already exists!' % policy_id)
self._policies[policy_id] = CachingPolicy(policy_id,
predicate,
mtime_func,
max_age_secs,
no_cache,
no_store,
must_revalidate,
vary,
etag_func,
s_max_age_secs,
proxy_revalidate,
public,
private,
no_transform,
enable_304s,
last_modified,
pre_check,
post_check)
idlist = list(self._policy_ids)
idlist.append(policy_id)
self._policy_ids = tuple(idlist)
@security.private
def _updatePolicy(self,
policy_id,
predicate,
mtime_func,
max_age_secs,
no_cache,
no_store,
must_revalidate,
vary,
etag_func,
s_max_age_secs=None,
proxy_revalidate=0,
public=0,
private=0,
no_transform=0,
enable_304s=0,
last_modified=1,
pre_check=None,
post_check=None):
"""
Update a policy in our registry.
"""
if policy_id not in self._policy_ids:
raise KeyError('Policy %s does not exist!' % policy_id)
self._policies[policy_id] = CachingPolicy(policy_id,
predicate,
mtime_func,
max_age_secs,
no_cache,
no_store,
must_revalidate,
vary,
etag_func,
s_max_age_secs,
proxy_revalidate,
public,
private,
no_transform,
enable_304s,
last_modified,
pre_check,
post_check)
@security.private
def _reorderPolicy(self, policy_id, newIndex):
"""
Reorder a policy in our registry.
"""
if policy_id not in self._policy_ids:
raise KeyError('Policy %s does not exist!' % policy_id)
idlist = list(self._policy_ids)
ndx = idlist.index(policy_id)
pred = idlist[ndx]
idlist = idlist[:ndx] + idlist[ndx + 1:]
idlist.insert(newIndex, pred)
self._policy_ids = tuple(idlist)
@security.private
def _removePolicy(self, policy_id):
"""
Remove a policy from our registry.
"""
if policy_id not in self._policy_ids:
raise KeyError('Policy %s does not exist!' % policy_id)
del self._policies[policy_id]
idlist = list(self._policy_ids)
ndx = idlist.index(policy_id)
idlist = idlist[:ndx] + idlist[ndx + 1:]
self._policy_ids = tuple(idlist)
#
# 'portal_caching' interface methods
#
@security.protected(View)
def getHTTPCachingHeaders(self, content, view_method, keywords, time=None):
"""
Return a list of HTTP caching headers based on 'content',
'view_method', and 'keywords'.
"""
context = createCPContext(content, view_method, keywords, time=time)
for _policy_id, policy in self.listPolicies():
headers = policy.getHeaders(context)
if headers:
return headers
return ()
@security.protected(View)
def getModTimeAndETag(self, content, view_method, keywords, time=None):
""" Return the modification time and ETag for the content object,
view method, and keywords as the tuple (modification_time, etag,
set_last_modified_header), where modification_time is a DateTime,
or None.
"""
context = createCPContext(content, view_method, keywords, time=time)
for _policy_id, policy in self.listPolicies():
if policy.getEnable304s() and policy.testPredicate(context):
last_modified = policy._mtime_func(context)
if isinstance(last_modified, str):
last_modified = DateTime(last_modified)
content_etag = None
if policy.getETagFunc():
content_etag = policy._etag_func(context)
return (last_modified, content_etag, policy.getLastModified())
return None
#
# OFS.CacheManager API
#
@security.private
def ZCacheManager_getCache(self):
""" Retrieve a cache object
"""
cache = getattr(self, '_cache', None)
if cache is None:
self._cache = CPMCache()
cache = self._cache
return cache
InitializeClass(CachingPolicyManager)
registerToolInterface('caching_policy_manager', ICachingPolicyManager)
def handleCachingPolicyManagerEvent(ob, event):
""" Event subscriber for (un)registering a CPM as CacheManager
"""
if not ICachingPolicyManager.providedBy(ob):
return
if IObjectMovedEvent.providedBy(event):
if event.newParent is not None:
ids = getVerifiedManagerIds(event.newParent)
id = ob.getId()
if id not in ids:
setattr(event.newParent, ZCM_MANAGERS, ids + (id,))
elif IObjectWillBeMovedEvent.providedBy(event):
if event.oldParent is not None:
ids = list(getVerifiedManagerIds(event.oldParent))
id = ob.getId()
if id in ids:
ids.remove(id)
setattr(event.oldParent, ZCM_MANAGERS, tuple(ids))
def manage_addCachingPolicyManager(self, REQUEST=None):
"""
Add a CPM to self.
"""
id = CachingPolicyManager.id
mgr = CachingPolicyManager()
self._setObject(id, mgr)
if REQUEST is not None:
pth = '/manage_main?manage_tabs_message=Caching+Policy+Manager+added.'
REQUEST['RESPONSE'].redirect('%s%s' % (self.absolute_url(), pth))
|
[
"zope.container.interfaces.IObjectMovedEvent.providedBy",
"Persistence.PersistentMapping",
"zope.interface.implementer",
"zope.component.getUtility",
"OFS.Cache.getVerifiedManagerIds",
"App.special_dtml.DTMLFile",
"OFS.interfaces.IObjectWillBeMovedEvent.providedBy",
"DateTime.DateTime.DateTime",
"AccessControl.SecurityInfo.ClassSecurityInfo",
"AccessControl.class_init.InitializeClass",
"Products.PageTemplates.Expressions.getEngine"
] |
[((4039, 4064), 'AccessControl.class_init.InitializeClass', 'InitializeClass', (['CPMCache'], {}), '(CPMCache)\n', (4054, 4064), False, 'from AccessControl.class_init import InitializeClass\n'), ((4068, 4095), 'zope.interface.implementer', 'implementer', (['ICachingPolicy'], {}), '(ICachingPolicy)\n', (4079, 4095), False, 'from zope.interface import implementer\n'), ((14726, 14760), 'zope.interface.implementer', 'implementer', (['ICachingPolicyManager'], {}), '(ICachingPolicyManager)\n', (14737, 14760), False, 'from zope.interface import implementer\n'), ((30092, 30129), 'AccessControl.class_init.InitializeClass', 'InitializeClass', (['CachingPolicyManager'], {}), '(CachingPolicyManager)\n', (30107, 30129), False, 'from AccessControl.class_init import InitializeClass\n'), ((2289, 2316), 'zope.component.getUtility', 'getUtility', (['IMembershipTool'], {}), '(IMembershipTool)\n', (2299, 2316), False, 'from zope.component import getUtility\n'), ((3032, 3051), 'AccessControl.SecurityInfo.ClassSecurityInfo', 'ClassSecurityInfo', ([], {}), '()\n', (3049, 3051), False, 'from AccessControl.SecurityInfo import ClassSecurityInfo\n'), ((15110, 15129), 'AccessControl.SecurityInfo.ClassSecurityInfo', 'ClassSecurityInfo', ([], {}), '()\n', (15127, 15129), False, 'from AccessControl.SecurityInfo import ClassSecurityInfo\n'), ((15624, 15661), 'App.special_dtml.DTMLFile', 'DTMLFile', (['"""cachingPolicies"""', '_dtmldir'], {}), "('cachingPolicies', _dtmldir)\n", (15632, 15661), False, 'from App.special_dtml import DTMLFile\n'), ((30398, 30433), 'zope.container.interfaces.IObjectMovedEvent.providedBy', 'IObjectMovedEvent.providedBy', (['event'], {}), '(event)\n', (30426, 30433), False, 'from zope.container.interfaces import IObjectMovedEvent\n'), ((2466, 2476), 'DateTime.DateTime.DateTime', 'DateTime', ([], {}), '()\n', (2474, 2476), False, 'from DateTime.DateTime import DateTime\n'), ((15210, 15229), 'Persistence.PersistentMapping', 'PersistentMapping', ([], {}), '()\n', (15227, 15229), False, 'from Persistence import PersistentMapping\n'), ((30668, 30709), 'OFS.interfaces.IObjectWillBeMovedEvent.providedBy', 'IObjectWillBeMovedEvent.providedBy', (['event'], {}), '(event)\n', (30702, 30709), False, 'from OFS.interfaces import IObjectWillBeMovedEvent\n'), ((2915, 2926), 'Products.PageTemplates.Expressions.getEngine', 'getEngine', ([], {}), '()\n', (2924, 2926), False, 'from Products.PageTemplates.Expressions import getEngine\n'), ((30493, 30531), 'OFS.Cache.getVerifiedManagerIds', 'getVerifiedManagerIds', (['event.newParent'], {}), '(event.newParent)\n', (30514, 30531), False, 'from OFS.Cache import getVerifiedManagerIds\n'), ((12758, 12773), 'DateTime.DateTime.DateTime', 'DateTime', (['mtime'], {}), '(mtime)\n', (12766, 12773), False, 'from DateTime.DateTime import DateTime\n'), ((29513, 29536), 'DateTime.DateTime.DateTime', 'DateTime', (['last_modified'], {}), '(last_modified)\n', (29521, 29536), False, 'from DateTime.DateTime import DateTime\n'), ((30774, 30812), 'OFS.Cache.getVerifiedManagerIds', 'getVerifiedManagerIds', (['event.oldParent'], {}), '(event.oldParent)\n', (30795, 30812), False, 'from OFS.Cache import getVerifiedManagerIds\n')]
|
import os
import json
import matplotlib.pyplot as plt
import numpy as np
import scipy.stats as stats
# t_stat, p_val = stats.ttest_ind(sample1, sample2, equal_var=False)
test_result_dir = "utils/testresults"
all_results = {}
aggregate_terms = [
"count", "valid", "missing", "distinct", "sum", "mean", "average",
"variance", "variancep", "stdev", "stdevp", "stderr", "median", "q1", "q3",
"ci0", "ci1", "min", "max", "argmin", "argmax"
]
file_paths = [
"/vizmodeluninat5.json", "/vizmodeluninat10.json",
"/vizmodeluninat15.json", "/vizmodeluninat20.json", "/vizmodeluni5.json",
"/vizmodeluni10.json", "/vizmodeluni15.json", "/vizmodeluni20.json",
"/vizmodelbi5.json", "/vizmodelbi10.json", "/vizmodelbi15.json",
"/vizmodelbi20.json"
]
def analyze_test_suite(test_dataset_directory):
# for subdir, dirs, files in os.walk(test_dataset_directory):
# for file in files:
# filepath = subdir + os.sep + file
# if filepath.endswith(
# "json") and not filepath.endswith("lsit.json"):
for filepath in file_paths:
filepath = test_result_dir + filepath
# data = json.load(open(filepath))
# print(filepath)
analyze_data(filepath)
def is_valid_aggregate(agg_val):
if (agg_val not in aggregate_terms):
# print("issh", agg_val)
return False
else:
return True
def computer_anova():
print("anova")
def analyze_data(filepath):
data = json.load(open(filepath))
beam_width = data["beamwidth"]
valid_json_array = []
valid_vega_array = []
phantom_count_array = []
x = list(range(0, 100))
for row in data["data"]:
valid_json_count = row["validjsoncount"] / beam_width
valid_json_array.append(valid_json_count)
valid_vega_count = row["validvegacount"]
vs_array = row["vegaspecarray"]
# mark specs with incorrect aggregation value as invalid vega
for vs_row in vs_array:
if ("aggregate" in vs_row["encoding"]["y"]):
if not is_valid_aggregate(
vs_row["encoding"]["y"]["aggregate"]):
valid_vega_count -= 1
else:
if ("aggregate" in vs_row["encoding"]["x"]):
if not is_valid_aggregate(
vs_row["encoding"]["x"]["aggregate"]):
valid_vega_count -= 1
# print(valid_vega_count, row["validjsoncount"])
valid_vegap_count = valid_vega_count
valid_vega_count = valid_vega_count / beam_width
valid_vega_array.append(valid_vega_count)
if (valid_vega_count == 0):
phantom_count = 0
else:
phantom_count = row["phantomcount"] / valid_vegap_count
phantom_count_array.append(phantom_count)
# print("Count", row["phantomcount"], valid_vegap_count)
# print(x, valid_json_array)
# plt.plot(x, valid_json_array)
# plt.plot(x, valid_vega_array)
# plt.plot(x, phantom_count_array)
# plt.show()
print(
filepath.split("vizmodel")[1], "Json:",
round(np.mean(valid_json_array), 3), "Vega",
round(np.mean(valid_vega_array), 3), "Mean % Phantom",
round(np.mean(phantom_count_array), 3))
result = {"json:": valid_json_array, "vega": valid_vega_array}
analyze_test_suite(test_result_dir)
# data = json.load(open("utils/testresults/vizmodelbi15.json"))
# print(len(data["data"]))
# analyze_data("utils/testresults/vizmodeluninat15.json")
|
[
"numpy.mean"
] |
[((3168, 3193), 'numpy.mean', 'np.mean', (['valid_json_array'], {}), '(valid_json_array)\n', (3175, 3193), True, 'import numpy as np\n'), ((3221, 3246), 'numpy.mean', 'np.mean', (['valid_vega_array'], {}), '(valid_vega_array)\n', (3228, 3246), True, 'import numpy as np\n'), ((3284, 3312), 'numpy.mean', 'np.mean', (['phantom_count_array'], {}), '(phantom_count_array)\n', (3291, 3312), True, 'import numpy as np\n')]
|
# file: app.py
import asyncio
import asyncpg
from pgorm.postgresql import PostgreSQL
from myapp.tables import User
async def run():
connection = await asyncpg.connect(
user='reckonsys', password='<PASSWORD>',
database='demo', host='db.reckonsys.com')
pg = PostgreSQL(connection)
# Lets create UUID OSSP extention (because UUIDField)
await pg.create_extension_uuid_ossp()
await pg.create(User) # Create Table
user = await pg.insert(User(name='dhilipsiva', age=30))
print(user)
# User(pk=UUID('f46863...'), name='dhilipsiva', age=30)
await connection.close()
loop = asyncio.get_event_loop()
loop.run_until_complete(run())
|
[
"myapp.tables.User",
"pgorm.postgresql.PostgreSQL",
"asyncpg.connect",
"asyncio.get_event_loop"
] |
[((621, 645), 'asyncio.get_event_loop', 'asyncio.get_event_loop', ([], {}), '()\n', (643, 645), False, 'import asyncio\n'), ((283, 305), 'pgorm.postgresql.PostgreSQL', 'PostgreSQL', (['connection'], {}), '(connection)\n', (293, 305), False, 'from pgorm.postgresql import PostgreSQL\n'), ((158, 260), 'asyncpg.connect', 'asyncpg.connect', ([], {'user': '"""reckonsys"""', 'password': '"""<PASSWORD>"""', 'database': '"""demo"""', 'host': '"""db.reckonsys.com"""'}), "(user='reckonsys', password='<PASSWORD>', database='demo',\n host='db.reckonsys.com')\n", (173, 260), False, 'import asyncpg\n'), ((475, 506), 'myapp.tables.User', 'User', ([], {'name': '"""dhilipsiva"""', 'age': '(30)'}), "(name='dhilipsiva', age=30)\n", (479, 506), False, 'from myapp.tables import User\n')]
|
import os
from pathlib import Path
from appdirs import user_cache_dir
from ._file import make_sure_dir_exist
CUSTOM_CACHE = None
BGEN_READER_CACHE_HOME = Path(
os.environ.get(
"BGEN_READER_CACHE_HOME",
default=Path(user_cache_dir("bgen-reader", "limix")) / "bgen-reader",
)
)
def custom_meta_path(custom_path: Path = None):
"""
All end user to over-ride default path behaviors and store files in a set
location. Potentially useful if working on a linux cluster where
permissions issues are more prevalent.
:param custom_path: Path to a directory to store meta data
"""
global CUSTOM_CACHE
CUSTOM_CACHE = custom_path
__all__ = ["BGEN_READER_CACHE_HOME", "custom_meta_path", "CUSTOM_CACHE"]
make_sure_dir_exist(BGEN_READER_CACHE_HOME)
make_sure_dir_exist(BGEN_READER_CACHE_HOME / "test_data")
make_sure_dir_exist(BGEN_READER_CACHE_HOME / "metafile")
|
[
"appdirs.user_cache_dir"
] |
[((239, 277), 'appdirs.user_cache_dir', 'user_cache_dir', (['"""bgen-reader"""', '"""limix"""'], {}), "('bgen-reader', 'limix')\n", (253, 277), False, 'from appdirs import user_cache_dir\n')]
|
"""`trash` lives on `GitHub <http://github.com/halst/trash/>`_."""
from distutils.core import setup
setup(name='trash',
version='0.1.0',
description='Safe `rm` substitute for OS X',
long_description=__doc__,
license='MIT',
author='<NAME>',
author_email='<EMAIL>',
url='https://github.com/halst/trash/',
classifiers=['Intended Audience :: Developers',
'Environment :: Console',
'Programming Language :: Python :: 2',
'Operating System :: MacOS',
'License :: OSI Approved :: MIT License'],
keywords='rm, rmtrash, trash',
py_modules=['trash'],
scripts=['trash'])
|
[
"distutils.core.setup"
] |
[((102, 594), 'distutils.core.setup', 'setup', ([], {'name': '"""trash"""', 'version': '"""0.1.0"""', 'description': '"""Safe `rm` substitute for OS X"""', 'long_description': '__doc__', 'license': '"""MIT"""', 'author': '"""<NAME>"""', 'author_email': '"""<EMAIL>"""', 'url': '"""https://github.com/halst/trash/"""', 'classifiers': "['Intended Audience :: Developers', 'Environment :: Console',\n 'Programming Language :: Python :: 2', 'Operating System :: MacOS',\n 'License :: OSI Approved :: MIT License']", 'keywords': '"""rm, rmtrash, trash"""', 'py_modules': "['trash']", 'scripts': "['trash']"}), "(name='trash', version='0.1.0', description=\n 'Safe `rm` substitute for OS X', long_description=__doc__, license=\n 'MIT', author='<NAME>', author_email='<EMAIL>', url=\n 'https://github.com/halst/trash/', classifiers=[\n 'Intended Audience :: Developers', 'Environment :: Console',\n 'Programming Language :: Python :: 2', 'Operating System :: MacOS',\n 'License :: OSI Approved :: MIT License'], keywords=\n 'rm, rmtrash, trash', py_modules=['trash'], scripts=['trash'])\n", (107, 594), False, 'from distutils.core import setup\n')]
|
# -*- coding:utf-8 -*-
from copy import deepcopy
from itertools import combinations_with_replacement
import pytest
from anywhere import testsets
@pytest.mark.parametrize(
'where_obj,string',
zip(
testsets.WHERES,
testsets.STRINGS,
),
ids=[str(i) for i in range(1, len(testsets.WHERES) + 1)],
)
def test_str(where_obj, string):
assert str(where_obj) == string
@pytest.mark.parametrize(
'where_obj,representation',
zip(
testsets.WHERES,
testsets.REPRESENTATIONS,
),
ids=[str(i) for i in range(1, len(testsets.WHERES) + 1)],
)
def test_repr(where_obj, representation):
assert repr(where_obj) == representation
@pytest.mark.parametrize(
'where_obj1,where_obj2',
combinations_with_replacement(testsets.WHERES, 2),
ids=lambda _: '',
)
def test_comparision(where_obj1, where_obj2):
if id(where_obj1) == id(where_obj2):
# We are comparing the same object, should be equal
assert where_obj1 == where_obj2
else:
assert where_obj1 != where_obj2
@pytest.mark.parametrize(
'where_obj',
testsets.WHERES,
ids=[str(i) for i in range(1, len(testsets.WHERES) + 1)],
)
def test_comparision_deepcopy(where_obj):
where_obj2 = deepcopy(where_obj)
assert id(where_obj) != id(where_obj2)
assert where_obj == where_obj2
|
[
"itertools.combinations_with_replacement",
"copy.deepcopy"
] |
[((746, 795), 'itertools.combinations_with_replacement', 'combinations_with_replacement', (['testsets.WHERES', '(2)'], {}), '(testsets.WHERES, 2)\n', (775, 795), False, 'from itertools import combinations_with_replacement\n'), ((1247, 1266), 'copy.deepcopy', 'deepcopy', (['where_obj'], {}), '(where_obj)\n', (1255, 1266), False, 'from copy import deepcopy\n')]
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import division
import json
import fnmatch
import os
import shutil
import subprocess
import tempfile
import time
import kudu
from kudu.client import Partitioning
class KuduTestBase(object):
"""
Base test class that will start a configurable number of master and
tablet servers.
"""
BASE_PORT = 37000
NUM_TABLET_SERVERS = 3
@classmethod
def start_cluster(cls):
local_path = tempfile.mkdtemp(dir=os.getenv("TEST_TMPDIR"))
kudu_build = os.getenv("KUDU_BUILD")
if not kudu_build:
kudu_build = os.path.join(os.getenv("KUDU_HOME"), "build", "latest")
bin_path = "{0}/bin".format(kudu_build)
os.makedirs("{0}/master/".format(local_path))
os.makedirs("{0}/master/data".format(local_path))
os.makedirs("{0}/master/logs".format(local_path))
path = [
"{0}/kudu-master".format(bin_path),
"-rpc_server_allow_ephemeral_ports",
"-rpc_bind_addresses=0.0.0.0:0",
"-fs_wal_dir={0}/master/data".format(local_path),
"-fs_data_dirs={0}/master/data".format(local_path),
"-log_dir={0}/master/logs".format(local_path),
"-logtostderr",
"-webserver_port=0",
# Only make one replica so that our tests don't need to worry about
# setting consistency modes.
"-default_num_replicas=1",
"-server_dump_info_path={0}/master/config.json".format(local_path)
]
p = subprocess.Popen(path, shell=False)
fid = open("{0}/master/kudu-master.pid".format(local_path), "w+")
fid.write("{0}".format(p.pid))
fid.close()
# We have to wait for the master to settle before the config file
# appears
config_file = "{0}/master/config.json".format(local_path)
for i in range(30):
if os.path.exists(config_file):
break
time.sleep(0.1 * (i + 1))
else:
raise Exception("Could not find kudu-master config file")
# If the server was started get the bind port from the config dump
master_config = json.load(open("{0}/master/config.json"
.format(local_path), "r"))
# One master bound on local host
master_port = master_config["bound_rpc_addresses"][0]["port"]
for m in range(cls.NUM_TABLET_SERVERS):
os.makedirs("{0}/ts/{1}".format(local_path, m))
os.makedirs("{0}/ts/{1}/logs".format(local_path, m))
path = [
"{0}/kudu-tserver".format(bin_path),
"-rpc_server_allow_ephemeral_ports",
"-rpc_bind_addresses=0.0.0.0:0",
"-tserver_master_addrs=127.0.0.1:{0}".format(master_port),
"-webserver_port=0",
"-log_dir={0}/master/logs".format(local_path),
"-logtostderr",
"-fs_data_dirs={0}/ts/{1}/data".format(local_path, m),
"-fs_wal_dir={0}/ts/{1}/data".format(local_path, m),
]
p = subprocess.Popen(path, shell=False)
tserver_pid = "{0}/ts/{1}/kudu-tserver.pid".format(local_path, m)
fid = open(tserver_pid, "w+")
fid.write("{0}".format(p.pid))
fid.close()
return local_path, master_port
@classmethod
def stop_cluster(cls, path):
for root, dirnames, filenames in os.walk('{0}/..'.format(path)):
for filename in fnmatch.filter(filenames, '*.pid'):
with open(os.path.join(root, filename)) as fid:
a = fid.read()
r = subprocess.Popen(["kill", "{0}".format(a)])
r.wait()
os.remove(os.path.join(root, filename))
shutil.rmtree(path, True)
@classmethod
def setUpClass(cls):
cls.cluster_path, master_port = cls.start_cluster()
time.sleep(1)
cls.master_host = '127.0.0.1'
cls.master_port = master_port
cls.client = kudu.connect(cls.master_host, cls.master_port)
cls.schema = cls.example_schema()
cls.partitioning = cls.example_partitioning()
cls.ex_table = 'example-table'
if cls.client.table_exists(cls.ex_table):
cls.client.delete_table(cls.ex_table)
cls.client.create_table(cls.ex_table, cls.schema, cls.partitioning)
@classmethod
def tearDownClass(cls):
cls.stop_cluster(cls.cluster_path)
@classmethod
def example_schema(cls):
builder = kudu.schema_builder()
builder.add_column('key', kudu.int32, nullable=False)
builder.add_column('int_val', kudu.int32)
builder.add_column('string_val', kudu.string)
builder.set_primary_keys(['key'])
return builder.build()
@classmethod
def example_partitioning(cls):
return Partitioning().set_range_partition_columns(['key'])
|
[
"fnmatch.filter",
"subprocess.Popen",
"os.path.join",
"kudu.schema_builder",
"os.path.exists",
"time.sleep",
"kudu.connect",
"shutil.rmtree",
"kudu.client.Partitioning",
"os.getenv"
] |
[((1291, 1314), 'os.getenv', 'os.getenv', (['"""KUDU_BUILD"""'], {}), "('KUDU_BUILD')\n", (1300, 1314), False, 'import os\n'), ((2310, 2345), 'subprocess.Popen', 'subprocess.Popen', (['path'], {'shell': '(False)'}), '(path, shell=False)\n', (2326, 2345), False, 'import subprocess\n'), ((4614, 4639), 'shutil.rmtree', 'shutil.rmtree', (['path', '(True)'], {}), '(path, True)\n', (4627, 4639), False, 'import shutil\n'), ((4751, 4764), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (4761, 4764), False, 'import time\n'), ((4864, 4910), 'kudu.connect', 'kudu.connect', (['cls.master_host', 'cls.master_port'], {}), '(cls.master_host, cls.master_port)\n', (4876, 4910), False, 'import kudu\n'), ((5378, 5399), 'kudu.schema_builder', 'kudu.schema_builder', ([], {}), '()\n', (5397, 5399), False, 'import kudu\n'), ((2681, 2708), 'os.path.exists', 'os.path.exists', (['config_file'], {}), '(config_file)\n', (2695, 2708), False, 'import os\n'), ((2744, 2769), 'time.sleep', 'time.sleep', (['(0.1 * (i + 1))'], {}), '(0.1 * (i + 1))\n', (2754, 2769), False, 'import time\n'), ((3899, 3934), 'subprocess.Popen', 'subprocess.Popen', (['path'], {'shell': '(False)'}), '(path, shell=False)\n', (3915, 3934), False, 'import subprocess\n'), ((4314, 4348), 'fnmatch.filter', 'fnmatch.filter', (['filenames', '"""*.pid"""'], {}), "(filenames, '*.pid')\n", (4328, 4348), False, 'import fnmatch\n'), ((1244, 1268), 'os.getenv', 'os.getenv', (['"""TEST_TMPDIR"""'], {}), "('TEST_TMPDIR')\n", (1253, 1268), False, 'import os\n'), ((1380, 1402), 'os.getenv', 'os.getenv', (['"""KUDU_HOME"""'], {}), "('KUDU_HOME')\n", (1389, 1402), False, 'import os\n'), ((5708, 5722), 'kudu.client.Partitioning', 'Partitioning', ([], {}), '()\n', (5720, 5722), False, 'from kudu.client import Partitioning\n'), ((4376, 4404), 'os.path.join', 'os.path.join', (['root', 'filename'], {}), '(root, filename)\n', (4388, 4404), False, 'import os\n'), ((4576, 4604), 'os.path.join', 'os.path.join', (['root', 'filename'], {}), '(root, filename)\n', (4588, 4604), False, 'import os\n')]
|
# -*- coding: utf-8 -*-
# system('python tickets.py "Sergio" "Fernandez" "<EMAIL>" "950001" "968868968"')
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support.ui import Select
from selenium.common.exceptions import NoSuchElementException
from selenium.common.exceptions import NoAlertPresentException
import unittest, time, re
from selenium.webdriver.common.action_chains import ActionChains
import sys
TLFN = sys.argv.pop()
CC = sys.argv.pop()
EMAIL = sys.argv.pop()
SURN = sys.argv.pop()
NAME = sys.argv.pop()
driver = webdriver.PhantomJS()
driver.get("http://averias.emartinez.es:8081/helpdesk/WebObjects/Helpdesk.woa/wa")
driver.find_element_by_id("userName").send_keys("admin")
driver.find_element_by_id("password").send_keys("PASSWORD")
driver.find_element_by_name("1172.16.31.10.7.4.1.11.0.1.0").click()
#driver.save_screenshot("/home/tecnico/WebApp/data/prueba.png")
#Nuevo cliente
driver.find_element_by_xpath("//img[@alt='Clientes']").click()
driver.find_element_by_css_selector("div.squareButtonMiddle").click()
driver.find_element_by_name("7.25.0.0.0.2.5.5.1.8.1.3.0.1.3.1.1.0.0.1.1.13.0.0.1").clear()
driver.find_element_by_name("7.25.0.0.0.2.5.5.1.8.1.3.0.1.3.1.1.0.0.1.1.13.0.0.1").send_keys(NAME) #NOMBRE
driver.find_element_by_name("7.25.0.0.0.2.5.5.1.8.1.3.0.1.3.1.1.0.0.1.1.17.0.0.1").clear()
driver.find_element_by_name("7.25.0.0.0.2.5.5.1.8.1.3.0.1.3.1.1.0.0.1.1.17.0.0.1").send_keys(SURN) #APELLIDO
driver.find_element_by_name("7.25.0.0.0.2.5.5.1.8.1.3.0.1.3.1.1.0.0.1.1.21.1.1.0.0.1").clear()
driver.find_element_by_name("7.25.0.0.0.2.5.5.1.8.1.3.0.1.3.1.1.0.0.1.1.21.1.1.0.0.1").send_keys(EMAIL) #EMAIL
driver.find_element_by_name("7.25.0.0.0.2.5.5.1.8.1.3.0.1.3.1.1.0.0.1.1.33.0.0.1").clear()
driver.find_element_by_name("7.25.0.0.0.2.5.5.1.8.1.3.0.1.3.1.1.0.0.1.1.33.0.0.1").send_keys(CC) #CÓDIGO CLIENTE
driver.find_element_by_name("7.25.0.0.0.2.5.5.1.8.1.3.0.1.3.1.1.0.0.1.1.41.0.0.1").clear()
driver.find_element_by_name("7.25.0.0.0.2.5.5.1.8.1.3.0.1.3.1.1.0.0.1.1.41.0.0.1").send_keys(TLFN) #TELÉFONO
driver.find_element_by_css_selector("div.aquaMiddleSel").click()
#Nuevo ticket
driver.find_element_by_xpath("//img[@alt='Configuración']").click()
element = driver.find_element_by_xpath("//div[@id='preferences-menu']/div/div[23]")
hover = ActionChains(driver).move_to_element(element)
hover.perform()
time.sleep(1)
driver.find_element_by_xpath("//div[@id='preferences-menu']/div/div[24]/ul/li[5]/a/div/div[2]").click()
driver.find_element_by_xpath("//input[@name='Field Separator' and @value='1']").click()
driver.find_element_by_xpath("//input[@type='file']").send_keys("ticketFIN.csv")
driver.find_element_by_css_selector("div.aquaMiddleSel").click()
time.sleep(4)
driver.find_element_by_id("logoutLink").click()
driver.close()
driver.quit()
|
[
"selenium.webdriver.PhantomJS",
"selenium.webdriver.common.action_chains.ActionChains",
"sys.argv.pop",
"time.sleep"
] |
[((514, 528), 'sys.argv.pop', 'sys.argv.pop', ([], {}), '()\n', (526, 528), False, 'import sys\n'), ((534, 548), 'sys.argv.pop', 'sys.argv.pop', ([], {}), '()\n', (546, 548), False, 'import sys\n'), ((557, 571), 'sys.argv.pop', 'sys.argv.pop', ([], {}), '()\n', (569, 571), False, 'import sys\n'), ((579, 593), 'sys.argv.pop', 'sys.argv.pop', ([], {}), '()\n', (591, 593), False, 'import sys\n'), ((601, 615), 'sys.argv.pop', 'sys.argv.pop', ([], {}), '()\n', (613, 615), False, 'import sys\n'), ((626, 647), 'selenium.webdriver.PhantomJS', 'webdriver.PhantomJS', ([], {}), '()\n', (645, 647), False, 'from selenium import webdriver\n'), ((2448, 2461), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (2458, 2461), False, 'import unittest, time, re\n'), ((2800, 2813), 'time.sleep', 'time.sleep', (['(4)'], {}), '(4)\n', (2810, 2813), False, 'import unittest, time, re\n'), ((2386, 2406), 'selenium.webdriver.common.action_chains.ActionChains', 'ActionChains', (['driver'], {}), '(driver)\n', (2398, 2406), False, 'from selenium.webdriver.common.action_chains import ActionChains\n')]
|
import numpy as np
import torch
from PIL import Image
import torchvision.transforms as T
from infer import Inference
from utils.nms import nms
torch.set_grad_enabled(False)
def class_agnostic_nms(boxes, scores, iou=0.5):
if len(boxes) > 1:
boxes, scores = nms(np.array(boxes), np.array(scores), iou)
return list(boxes), list(scores)
else:
return boxes, scores
def generate_image_crops(img, num_crops=8):
"""
Note: num_crops must be greater than 2 and of multiple of 2
"""
assert num_crops > 2
assert num_crops % 2 == 0
# Get the image width and height
img_w, img_h = img.size
crops = []
coordinates = []
crops.append(img)
coordinates.append((0, 0, img_w, img_h))
crop_chunks_x = int(num_crops / 2)
crop_chunks_y = int(num_crops / crop_chunks_x)
x_inc = int(img_w / crop_chunks_y)
y_inc = int(img_h / crop_chunks_y)
x_space = np.linspace(0, img_w - x_inc, crop_chunks_y)
y_spcae = np.linspace(0, img_h - y_inc, int(num_crops / crop_chunks_y))
if num_crops > 1:
for x in x_space:
for y in y_spcae:
x1, y1 = x, y
x2, y2 = x1 + x_inc, y1 + y_inc
crops.append((img.crop((x1, y1, x2, y2))).resize((img_w, img_h)))
coordinates.append((x1, y1, x2, y2))
return crops, coordinates, (img_w, img_h)
def scale_boxes(boxes, coordinates, img_dims):
x1, y1, x2, y2 = coordinates
img_w, img_h = img_dims
w = x2 - x1
h = y2 - y1
for b in boxes:
b[0], b[1], b[2], b[3] = int((b[0] / img_w) * w) + x1, int((b[1] / img_h) * h) + y1, \
int((b[2] / img_w) * w) + x1, int((b[3] / img_h) * h) + y1
return boxes
class ModulatedDetection(Inference):
"""
The class supports the inference using both MDETR & MDef-DETR models.
"""
def __init__(self, model, confidence_thresh=0.0):
Inference.__init__(self, model)
self.conf_thresh = confidence_thresh
self.transform = T.Compose([
T.Resize(800),
T.ToTensor(),
T.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
])
@staticmethod
def box_cxcywh_to_xyxy(x):
x_c, y_c, w, h = x.unbind(1)
b = [(x_c - 0.5 * w), (y_c - 0.5 * h),
(x_c + 0.5 * w), (y_c + 0.5 * h)]
return torch.stack(b, dim=1)
def rescale_bboxes(self, out_bbox, size):
img_w, img_h = size
b = self.box_cxcywh_to_xyxy(out_bbox)
b = b * torch.tensor([img_w, img_h, img_w, img_h], dtype=torch.float32)
return b
def infer_image(self, image_path, **kwargs):
caption = kwargs["caption"]
# Read the image
im = Image.open(image_path)
imq = np.array(im)
if len(imq.shape) != 3:
im = im.convert('RGB')
img = self.transform(im).unsqueeze(0).cuda()
# propagate through the models
memory_cache = self.model(img, [caption], encode_and_save=True)
outputs = self.model(img, [caption], encode_and_save=False, memory_cache=memory_cache)
# keep only predictions with self.conf_thresh+ confidence
probas = 1 - outputs['pred_logits'].softmax(-1)[0, :, -1].cpu()
keep = (probas > self.conf_thresh).cpu()
# convert boxes from [0; 1] to image scales
bboxes_scaled = self.rescale_bboxes(outputs['pred_boxes'].cpu()[0, keep], im.size)
kept_probs = probas[keep]
# Convert outputs to the required format
bboxes = list(bboxes_scaled.numpy())
probs = list(kept_probs.numpy())
boxes, scores = [], []
for b, conf in zip(bboxes, probs):
boxes.append([int(b[0]), int(b[1]), int(b[2]), int(b[3])])
scores.append(conf)
# Read image, perform inference, parse results, append the predicted boxes to detections
return boxes, scores
def infer_image_multi_crop(self, image_path, **kwargs):
caption = kwargs["caption"]
# Read the image
im = Image.open(image_path)
crops, coordinates, img_dims = generate_image_crops(im)
imgs = [self.transform(crop).unsqueeze(0).cuda() for crop in crops]
imgs = torch.cat(imgs)
# propagate through the models
memory_cache = self.model(imgs, [caption for i in range(imgs.shape[0])], encode_and_save=True)
outputs = self.model(imgs, [caption], encode_and_save=False, memory_cache=memory_cache)
all_boxes = []
all_scores = []
for i in range(len(crops)):
# keep only predictions with self.conf_thresh+ confidence
probas = 1 - outputs['pred_logits'].softmax(-1)[i, :, -1].cpu()
keep = (probas > self.conf_thresh).cpu()
# convert boxes from [0; 1] to image scales
bboxes_scaled = self.rescale_bboxes(outputs['pred_boxes'].cpu()[i, keep], im.size)
kept_probs = probas[keep]
# Convert outputs to the required format
bboxes = list(bboxes_scaled.numpy())
probs = list(kept_probs.numpy())
boxes, scores = [], []
for b, conf in zip(bboxes, probs):
boxes.append([int(b[0]), int(b[1]), int(b[2]), int(b[3])])
scores.append(conf)
# Read image, perform inference, parse results, append the predicted boxes to detections
boxes = scale_boxes(boxes, coordinates[i], img_dims)
all_boxes += boxes
all_scores += scores
all_boxes = class_agnostic_nms(all_boxes, all_scores)
return all_boxes, all_scores
|
[
"torch.stack",
"torchvision.transforms.Normalize",
"torch.cat",
"PIL.Image.open",
"torchvision.transforms.ToTensor",
"numpy.array",
"numpy.linspace",
"torch.set_grad_enabled",
"torch.tensor",
"infer.Inference.__init__",
"torchvision.transforms.Resize"
] |
[((144, 173), 'torch.set_grad_enabled', 'torch.set_grad_enabled', (['(False)'], {}), '(False)\n', (166, 173), False, 'import torch\n'), ((926, 970), 'numpy.linspace', 'np.linspace', (['(0)', '(img_w - x_inc)', 'crop_chunks_y'], {}), '(0, img_w - x_inc, crop_chunks_y)\n', (937, 970), True, 'import numpy as np\n'), ((1941, 1972), 'infer.Inference.__init__', 'Inference.__init__', (['self', 'model'], {}), '(self, model)\n', (1959, 1972), False, 'from infer import Inference\n'), ((2385, 2406), 'torch.stack', 'torch.stack', (['b'], {'dim': '(1)'}), '(b, dim=1)\n', (2396, 2406), False, 'import torch\n'), ((2749, 2771), 'PIL.Image.open', 'Image.open', (['image_path'], {}), '(image_path)\n', (2759, 2771), False, 'from PIL import Image\n'), ((2786, 2798), 'numpy.array', 'np.array', (['im'], {}), '(im)\n', (2794, 2798), True, 'import numpy as np\n'), ((4062, 4084), 'PIL.Image.open', 'Image.open', (['image_path'], {}), '(image_path)\n', (4072, 4084), False, 'from PIL import Image\n'), ((4240, 4255), 'torch.cat', 'torch.cat', (['imgs'], {}), '(imgs)\n', (4249, 4255), False, 'import torch\n'), ((275, 290), 'numpy.array', 'np.array', (['boxes'], {}), '(boxes)\n', (283, 290), True, 'import numpy as np\n'), ((292, 308), 'numpy.array', 'np.array', (['scores'], {}), '(scores)\n', (300, 308), True, 'import numpy as np\n'), ((2544, 2607), 'torch.tensor', 'torch.tensor', (['[img_w, img_h, img_w, img_h]'], {'dtype': 'torch.float32'}), '([img_w, img_h, img_w, img_h], dtype=torch.float32)\n', (2556, 2607), False, 'import torch\n'), ((2067, 2080), 'torchvision.transforms.Resize', 'T.Resize', (['(800)'], {}), '(800)\n', (2075, 2080), True, 'import torchvision.transforms as T\n'), ((2094, 2106), 'torchvision.transforms.ToTensor', 'T.ToTensor', ([], {}), '()\n', (2104, 2106), True, 'import torchvision.transforms as T\n'), ((2120, 2177), 'torchvision.transforms.Normalize', 'T.Normalize', (['[0.485, 0.456, 0.406]', '[0.229, 0.224, 0.225]'], {}), '([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])\n', (2131, 2177), True, 'import torchvision.transforms as T\n')]
|
from pytube import YouTube
import re
import os
#Action
class AuxFunctions():
def get_resolutions(self, url):
res_exist = {'144p': False, '240p': False, '360p': False, '480p': False, '720p': False, '1080p': False, '1440p': False, '2160p': False, '4320p': False}
resolutions_list = list()
function_name = self.get_resolutions.__name__
try:
video = YouTube(url)
describe_tube = video.streams.all()
for i in describe_tube:
for j in res_exist:
res_string = str(i.resolution)
if res_string == j:
res_exist[j] = True
for i in res_exist:
if res_exist[i] == True:
resolutions_list.append(i)
print('Success in ' + function_name)
except:
print('Error in ' + function_name)
return None
return resolutions_list
def removeEmoji(self, text):
#Variaveis
name_function = self.removeEmoji.__name__
try:
emoji_pattern = re.compile("["
u"\U0001F600-\U0001F64F" # emoticons
u"\U0001F300-\U0001F5FF" # symbols & pictographs
u"\U0001F680-\U0001F6FF" # transport & map symbols
u"\U0001F1E0-\U0001F1FF" # flags (iOS)
"]+", flags=re.UNICODE)
text_format = emoji_pattern.sub(r'', text)
text_format = re.sub(' +', ' ', text_format)
text_format = text_format.replace('"', '')
text_format = text_format.replace('\\', '')
text_format = text_format.replace('/', '')
text_format = text_format.replace(':', '')
text_format = text_format.replace('*', '')
text_format = text_format.replace('?', '')
text_format = text_format.replace('<', '')
text_format = text_format.replace('>', '')
text_format = text_format.replace('|', '')
print('Success in ' + name_function)
except:
print('Error in ' + name_function)
return None
return text_format
def removeFiles(self, video_title):
#Variaveis
path_video = './download/mp4/' + video_title + '.mp4'
path_audio = './download/mp3/' + video_title
path_last_file = './download/' + video_title + '.mp4'
function_name = self.removeFiles.__name__
try:
if os.path.exists(path_video):
os.remove(path_video)
if os.path.exists(path_audio + '.mp3'):
os.remove(path_audio + '.mp3')
if os.path.exists(path_audio + '.mp4'):
os.remove(path_audio + '.mp4')
if os.path.exists(path_last_file):
os.remove(path_last_file)
print('Success in ' + function_name)
except:
print('Error in ' + function_name)
def information_Audio(self, stream, title):
info_Audio = {'Nome': '', 'Bitrate': '', 'Codec': '', 'Formato': ''}
function_name = self.information_Audio.__name__
try:
info_Audio['Nome'] = title
info_Audio['Bitrate'] = stream.abr
info_Audio['Codec'] = stream.codecs[1]
info_Audio['Formato'] = 'mp3'
print('Success in ' + function_name)
except:
print('Error in ' + function_name)
return None
return info_Audio
def information_Video(self, stream, title):
function_name = self.information_Video.__name__
info_Video = {'Nome': '', 'Resolução': '', 'Fps': '', 'VCodec': '', 'ACodec': '', 'Formato': ''}
try:
info_Video['Nome'] = stream.title
info_Video['Resolução'] = stream.resolution
info_Video['Fps'] = stream.fps
info_Video['VCodec'] = stream.codecs[0]
info_Video['ACodec'] = stream.codecs[1]
info_Video['Formato'] = 'mp4'
print('Sucess in ' + function_name)
except:
print('Error in ' + function_name)
return None
return info_Video
|
[
"os.remove",
"pytube.YouTube",
"os.path.exists",
"re.sub",
"re.compile"
] |
[((396, 408), 'pytube.YouTube', 'YouTube', (['url'], {}), '(url)\n', (403, 408), False, 'from pytube import YouTube\n'), ((1099, 1164), 're.compile', 're.compile', (['"""[😀-🙏🌀-🗿🚀-\U0001f6ff\U0001f1e0-🇿]+"""'], {'flags': 're.UNICODE'}), "('[😀-🙏🌀-🗿🚀-\\U0001f6ff\\U0001f1e0-🇿]+', flags=re.UNICODE)\n", (1109, 1164), False, 'import re\n'), ((1476, 1506), 're.sub', 're.sub', (['""" +"""', '""" """', 'text_format'], {}), "(' +', ' ', text_format)\n", (1482, 1506), False, 'import re\n'), ((2486, 2512), 'os.path.exists', 'os.path.exists', (['path_video'], {}), '(path_video)\n', (2500, 2512), False, 'import os\n'), ((2580, 2615), 'os.path.exists', 'os.path.exists', (["(path_audio + '.mp3')"], {}), "(path_audio + '.mp3')\n", (2594, 2615), False, 'import os\n'), ((2692, 2727), 'os.path.exists', 'os.path.exists', (["(path_audio + '.mp4')"], {}), "(path_audio + '.mp4')\n", (2706, 2727), False, 'import os\n'), ((2804, 2834), 'os.path.exists', 'os.path.exists', (['path_last_file'], {}), '(path_last_file)\n', (2818, 2834), False, 'import os\n'), ((2530, 2551), 'os.remove', 'os.remove', (['path_video'], {}), '(path_video)\n', (2539, 2551), False, 'import os\n'), ((2633, 2663), 'os.remove', 'os.remove', (["(path_audio + '.mp3')"], {}), "(path_audio + '.mp3')\n", (2642, 2663), False, 'import os\n'), ((2745, 2775), 'os.remove', 'os.remove', (["(path_audio + '.mp4')"], {}), "(path_audio + '.mp4')\n", (2754, 2775), False, 'import os\n'), ((2852, 2877), 'os.remove', 'os.remove', (['path_last_file'], {}), '(path_last_file)\n', (2861, 2877), False, 'import os\n')]
|
# -*- coding: utf-8 -*-
import hashlib
import logging
import os
import tempfile
import time
import cv2
import numpy as np
WIDTH_HEIGHT_LIMIT = 1600 # in pixel
def resize_large_image(image_data):
img_array = np.fromstring(image_data, dtype=np.uint8)
image = cv2.imdecode(img_array, 1)
height, width = image.shape[:2]
logging.info("Height: {}, Width: {}".format(height, width))
if height > width and height > WIDTH_HEIGHT_LIMIT:
ratio = float(WIDTH_HEIGHT_LIMIT) / float(height)
new_width = int((width * ratio) + 0.5)
return cv2.resize(
image,
(new_width, WIDTH_HEIGHT_LIMIT),
interpolation=cv2.INTER_AREA
)
elif width > WIDTH_HEIGHT_LIMIT:
ratio = float(WIDTH_HEIGHT_LIMIT) / float(width)
new_height = int((height * ratio) + 0.5)
return cv2.resize(
image,
(WIDTH_HEIGHT_LIMIT, new_height),
interpolation=cv2.INTER_AREA
)
else:
return image
def resize_faces(image_files, width=96, height=96):
for image_file in image_files:
image = cv2.imread(image_file)
resized_image = cv2.resize(
image,
(width, height),
interpolation=cv2.INTER_AREA
)
cv2.imwrite(image_file, resized_image)
def cleanup_image_cache(image_dir, expire=3600): # Expire in 1 hour
now = time.time()
for f in os.listdir(image_dir):
f = os.path.join(image_dir, f)
if os.stat(f).st_mtime < now - expire:
if os.path.isfile(f):
os.remove(f)
def sha256_checksum(filename, block_size=65536):
sha256 = hashlib.sha256()
with open(filename, 'rb') as f:
for block in iter(lambda: f.read(block_size), b''):
sha256.update(block)
return sha256.hexdigest()
def get_hex_value(r, g, b):
def clamp(x):
return max(0, min(x, 255))
return "#{0:02x}{1:02x}{2:02x}".format(clamp(r), clamp(g), clamp(b))
def get_resized_face_temp_file(face_dict, cv2_img):
width, height = 96, 96
pos = face_dict['pos']
crop_img = cv2_img[pos.y:pos.y+pos.height, pos.x:pos.x+pos.width]
resized_img = cv2.resize(
crop_img,
(width, height),
interpolation=cv2.INTER_AREA
)
resized_path = None
with tempfile.NamedTemporaryFile(delete=False, suffix='.jpg') as temp_ff:
resized_path = temp_ff.name
cv2.imwrite(temp_ff.name, resized_img)
return resized_path
|
[
"tempfile.NamedTemporaryFile",
"os.remove",
"os.stat",
"cv2.imwrite",
"cv2.imdecode",
"time.time",
"hashlib.sha256",
"cv2.imread",
"numpy.fromstring",
"os.path.isfile",
"os.path.join",
"os.listdir",
"cv2.resize"
] |
[((216, 257), 'numpy.fromstring', 'np.fromstring', (['image_data'], {'dtype': 'np.uint8'}), '(image_data, dtype=np.uint8)\n', (229, 257), True, 'import numpy as np\n'), ((270, 296), 'cv2.imdecode', 'cv2.imdecode', (['img_array', '(1)'], {}), '(img_array, 1)\n', (282, 296), False, 'import cv2\n'), ((1407, 1418), 'time.time', 'time.time', ([], {}), '()\n', (1416, 1418), False, 'import time\n'), ((1432, 1453), 'os.listdir', 'os.listdir', (['image_dir'], {}), '(image_dir)\n', (1442, 1453), False, 'import os\n'), ((1668, 1684), 'hashlib.sha256', 'hashlib.sha256', ([], {}), '()\n', (1682, 1684), False, 'import hashlib\n'), ((2197, 2264), 'cv2.resize', 'cv2.resize', (['crop_img', '(width, height)'], {'interpolation': 'cv2.INTER_AREA'}), '(crop_img, (width, height), interpolation=cv2.INTER_AREA)\n', (2207, 2264), False, 'import cv2\n'), ((572, 657), 'cv2.resize', 'cv2.resize', (['image', '(new_width, WIDTH_HEIGHT_LIMIT)'], {'interpolation': 'cv2.INTER_AREA'}), '(image, (new_width, WIDTH_HEIGHT_LIMIT), interpolation=cv2.INTER_AREA\n )\n', (582, 657), False, 'import cv2\n'), ((1121, 1143), 'cv2.imread', 'cv2.imread', (['image_file'], {}), '(image_file)\n', (1131, 1143), False, 'import cv2\n'), ((1168, 1232), 'cv2.resize', 'cv2.resize', (['image', '(width, height)'], {'interpolation': 'cv2.INTER_AREA'}), '(image, (width, height), interpolation=cv2.INTER_AREA)\n', (1178, 1232), False, 'import cv2\n'), ((1287, 1325), 'cv2.imwrite', 'cv2.imwrite', (['image_file', 'resized_image'], {}), '(image_file, resized_image)\n', (1298, 1325), False, 'import cv2\n'), ((1467, 1493), 'os.path.join', 'os.path.join', (['image_dir', 'f'], {}), '(image_dir, f)\n', (1479, 1493), False, 'import os\n'), ((2328, 2384), 'tempfile.NamedTemporaryFile', 'tempfile.NamedTemporaryFile', ([], {'delete': '(False)', 'suffix': '""".jpg"""'}), "(delete=False, suffix='.jpg')\n", (2355, 2384), False, 'import tempfile\n'), ((2441, 2479), 'cv2.imwrite', 'cv2.imwrite', (['temp_ff.name', 'resized_img'], {}), '(temp_ff.name, resized_img)\n', (2452, 2479), False, 'import cv2\n'), ((857, 943), 'cv2.resize', 'cv2.resize', (['image', '(WIDTH_HEIGHT_LIMIT, new_height)'], {'interpolation': 'cv2.INTER_AREA'}), '(image, (WIDTH_HEIGHT_LIMIT, new_height), interpolation=cv2.\n INTER_AREA)\n', (867, 943), False, 'import cv2\n'), ((1556, 1573), 'os.path.isfile', 'os.path.isfile', (['f'], {}), '(f)\n', (1570, 1573), False, 'import os\n'), ((1505, 1515), 'os.stat', 'os.stat', (['f'], {}), '(f)\n', (1512, 1515), False, 'import os\n'), ((1591, 1603), 'os.remove', 'os.remove', (['f'], {}), '(f)\n', (1600, 1603), False, 'import os\n')]
|
import json
def load_json(file_name):
with open(file_name) as json_data:
return json.load(json_data)
|
[
"json.load"
] |
[((92, 112), 'json.load', 'json.load', (['json_data'], {}), '(json_data)\n', (101, 112), False, 'import json\n')]
|
import importlib
from hydroDL.master import basins
from hydroDL.app import waterQuality, wqLinear
from hydroDL import kPath
from hydroDL.model import trainTS
from hydroDL.data import gageII, usgs
from hydroDL.post import axplot, figplot
import torch
import os
import json
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
# test
outName = 'Silica64-Y8090-00955-opt1'
wqData = waterQuality.DataModelWQ('Silica64')
code = '00955'
trainset = 'Y8090'
testset = 'Y0010'
# trainset = 'Y0010'
# testset = 'Y8090'
optT = trainset
master = basins.loadMaster(outName)
# seq test
siteNoLst = wqData.info['siteNo'].unique().tolist()
basins.testModelSeq(outName, siteNoLst, wqData=wqData)
ns = len(siteNoLst)
# calculate error from sequence
rmseMat = np.ndarray([ns, 2])
corrMat = np.ndarray([ns, 2])
for k, siteNo in enumerate(siteNoLst):
print(k, siteNo)
dfPred, dfObs = basins.loadSeq(outName, siteNo)
rmseLSTM, corrLSTM = waterQuality.calErrSeq(dfPred[code], dfObs[code])
rmseMat[k, :] = rmseLSTM
corrMat[k, :] = corrLSTM
# time series map
dfCrd = gageII.readData(
varLst=['LAT_GAGE', 'LNG_GAGE'], siteNoLst=siteNoLst)
lat = dfCrd['LAT_GAGE'].values
lon = dfCrd['LNG_GAGE'].values
codePdf = usgs.codePdf
def funcMap():
figM, axM = plt.subplots(2, 1, figsize=(8, 6))
axplot.mapPoint(axM[0], lat, lon, corrMat[:, 0]-corrMat[:, 1], s=12)
axplot.mapPoint(axM[1], lat, lon, corrMat[:, 1], s=12)
figP, axP = plt.subplots(1, 1, figsize=(8, 6))
return figM, axM, figP, axP, lon, lat
def funcPoint(iP, axP):
siteNo = siteNoLst[iP]
dfP1, dfObs = basins.loadSeq(outName, siteNo)
rmse1, corr1 = waterQuality.calErrSeq(dfP1[code], dfObs[code])
t = dfObs.index.values
tBar = np.datetime64('2000-01-01')
axplot.plotTS(axP, t, [dfP1[code], dfObs[code]], tBar=tBar,
legLst=['LSTM', 'obs'], styLst='-*', cLst='br')
tStr = '{}, rmse [{:.2f} {:.2f}], corr [{:.2f} {:.2f}]'.format(
siteNo, rmse1[0], rmse1[1], corr1[0], corr1[1])
axP.set_title(tStr)
importlib.reload(figplot)
figM, figP = figplot.clickMap(funcMap, funcPoint)
for ax in figP.axes:
ax.set_xlim(np.datetime64('2010-01-01'), np.datetime64('2015-01-01'))
figP.canvas.draw()
for ax in figP.axes:
ax.set_xlim(np.datetime64('1990-01-01'), np.datetime64('1995-01-01'))
figP.canvas.draw()
for ax in figP.axes:
ax.set_xlim(np.datetime64('1980-01-01'), np.datetime64('2020-01-01'))
figP.canvas.draw()
for ax in figP.axes:
ax.set_ylim(5, 30)
figP.canvas.draw()
|
[
"hydroDL.post.axplot.plotTS",
"hydroDL.app.waterQuality.calErrSeq",
"hydroDL.post.figplot.clickMap",
"numpy.datetime64",
"matplotlib.pyplot.subplots",
"hydroDL.app.waterQuality.DataModelWQ",
"hydroDL.master.basins.loadSeq",
"importlib.reload",
"hydroDL.master.basins.loadMaster",
"hydroDL.post.axplot.mapPoint",
"hydroDL.data.gageII.readData",
"numpy.ndarray",
"hydroDL.master.basins.testModelSeq"
] |
[((400, 436), 'hydroDL.app.waterQuality.DataModelWQ', 'waterQuality.DataModelWQ', (['"""Silica64"""'], {}), "('Silica64')\n", (424, 436), False, 'from hydroDL.app import waterQuality, wqLinear\n'), ((555, 581), 'hydroDL.master.basins.loadMaster', 'basins.loadMaster', (['outName'], {}), '(outName)\n', (572, 581), False, 'from hydroDL.master import basins\n'), ((646, 700), 'hydroDL.master.basins.testModelSeq', 'basins.testModelSeq', (['outName', 'siteNoLst'], {'wqData': 'wqData'}), '(outName, siteNoLst, wqData=wqData)\n', (665, 700), False, 'from hydroDL.master import basins\n'), ((763, 782), 'numpy.ndarray', 'np.ndarray', (['[ns, 2]'], {}), '([ns, 2])\n', (773, 782), True, 'import numpy as np\n'), ((793, 812), 'numpy.ndarray', 'np.ndarray', (['[ns, 2]'], {}), '([ns, 2])\n', (803, 812), True, 'import numpy as np\n'), ((1085, 1154), 'hydroDL.data.gageII.readData', 'gageII.readData', ([], {'varLst': "['LAT_GAGE', 'LNG_GAGE']", 'siteNoLst': 'siteNoLst'}), "(varLst=['LAT_GAGE', 'LNG_GAGE'], siteNoLst=siteNoLst)\n", (1100, 1154), False, 'from hydroDL.data import gageII, usgs\n'), ((2054, 2079), 'importlib.reload', 'importlib.reload', (['figplot'], {}), '(figplot)\n', (2070, 2079), False, 'import importlib\n'), ((2093, 2129), 'hydroDL.post.figplot.clickMap', 'figplot.clickMap', (['funcMap', 'funcPoint'], {}), '(funcMap, funcPoint)\n', (2109, 2129), False, 'from hydroDL.post import axplot, figplot\n'), ((893, 924), 'hydroDL.master.basins.loadSeq', 'basins.loadSeq', (['outName', 'siteNo'], {}), '(outName, siteNo)\n', (907, 924), False, 'from hydroDL.master import basins\n'), ((950, 999), 'hydroDL.app.waterQuality.calErrSeq', 'waterQuality.calErrSeq', (['dfPred[code]', 'dfObs[code]'], {}), '(dfPred[code], dfObs[code])\n', (972, 999), False, 'from hydroDL.app import waterQuality, wqLinear\n'), ((1278, 1312), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(2)', '(1)'], {'figsize': '(8, 6)'}), '(2, 1, figsize=(8, 6))\n', (1290, 1312), True, 'import matplotlib.pyplot as plt\n'), ((1317, 1387), 'hydroDL.post.axplot.mapPoint', 'axplot.mapPoint', (['axM[0]', 'lat', 'lon', '(corrMat[:, 0] - corrMat[:, 1])'], {'s': '(12)'}), '(axM[0], lat, lon, corrMat[:, 0] - corrMat[:, 1], s=12)\n', (1332, 1387), False, 'from hydroDL.post import axplot, figplot\n'), ((1390, 1444), 'hydroDL.post.axplot.mapPoint', 'axplot.mapPoint', (['axM[1]', 'lat', 'lon', 'corrMat[:, 1]'], {'s': '(12)'}), '(axM[1], lat, lon, corrMat[:, 1], s=12)\n', (1405, 1444), False, 'from hydroDL.post import axplot, figplot\n'), ((1461, 1495), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(1)'], {'figsize': '(8, 6)'}), '(1, 1, figsize=(8, 6))\n', (1473, 1495), True, 'import matplotlib.pyplot as plt\n'), ((1609, 1640), 'hydroDL.master.basins.loadSeq', 'basins.loadSeq', (['outName', 'siteNo'], {}), '(outName, siteNo)\n', (1623, 1640), False, 'from hydroDL.master import basins\n'), ((1660, 1707), 'hydroDL.app.waterQuality.calErrSeq', 'waterQuality.calErrSeq', (['dfP1[code]', 'dfObs[code]'], {}), '(dfP1[code], dfObs[code])\n', (1682, 1707), False, 'from hydroDL.app import waterQuality, wqLinear\n'), ((1746, 1773), 'numpy.datetime64', 'np.datetime64', (['"""2000-01-01"""'], {}), "('2000-01-01')\n", (1759, 1773), True, 'import numpy as np\n'), ((1778, 1889), 'hydroDL.post.axplot.plotTS', 'axplot.plotTS', (['axP', 't', '[dfP1[code], dfObs[code]]'], {'tBar': 'tBar', 'legLst': "['LSTM', 'obs']", 'styLst': '"""-*"""', 'cLst': '"""br"""'}), "(axP, t, [dfP1[code], dfObs[code]], tBar=tBar, legLst=['LSTM',\n 'obs'], styLst='-*', cLst='br')\n", (1791, 1889), False, 'from hydroDL.post import axplot, figplot\n'), ((2168, 2195), 'numpy.datetime64', 'np.datetime64', (['"""2010-01-01"""'], {}), "('2010-01-01')\n", (2181, 2195), True, 'import numpy as np\n'), ((2197, 2224), 'numpy.datetime64', 'np.datetime64', (['"""2015-01-01"""'], {}), "('2015-01-01')\n", (2210, 2224), True, 'import numpy as np\n'), ((2283, 2310), 'numpy.datetime64', 'np.datetime64', (['"""1990-01-01"""'], {}), "('1990-01-01')\n", (2296, 2310), True, 'import numpy as np\n'), ((2312, 2339), 'numpy.datetime64', 'np.datetime64', (['"""1995-01-01"""'], {}), "('1995-01-01')\n", (2325, 2339), True, 'import numpy as np\n'), ((2398, 2425), 'numpy.datetime64', 'np.datetime64', (['"""1980-01-01"""'], {}), "('1980-01-01')\n", (2411, 2425), True, 'import numpy as np\n'), ((2427, 2454), 'numpy.datetime64', 'np.datetime64', (['"""2020-01-01"""'], {}), "('2020-01-01')\n", (2440, 2454), True, 'import numpy as np\n')]
|
#!/bin/python3
import sys
import os
import Markov
if __name__ == "__main__":
if (len(sys.argv) < 2 or len(sys.argv) > 3):
print("usage: {} sourcefile.txt [existingbrain.json]".format(
sys.argv[0]))
sys.exit(-1)
if (not os.path.exists(sys.argv[1])):
print("Can't find source corpus {}".format(sys.argv[1]))
sys.exit(-1)
brain = Markov.Brain()
# If specified, load the existing brain and get it ready to merge
if (len(sys.argv) == 3):
if (not os.path.exists(sys.argv[2])):
print("Can't find brain '{}' to merge with".format(sys.argv[2]))
sys.exit(-1)
brain.loadExistingBrain(sys.argv[2])
brain.compileCorupus(sys.argv[1])
print(brain.toJSON())
|
[
"Markov.Brain",
"os.path.exists",
"sys.exit"
] |
[((388, 402), 'Markov.Brain', 'Markov.Brain', ([], {}), '()\n', (400, 402), False, 'import Markov\n'), ((233, 245), 'sys.exit', 'sys.exit', (['(-1)'], {}), '(-1)\n', (241, 245), False, 'import sys\n'), ((259, 286), 'os.path.exists', 'os.path.exists', (['sys.argv[1]'], {}), '(sys.argv[1])\n', (273, 286), False, 'import os\n'), ((362, 374), 'sys.exit', 'sys.exit', (['(-1)'], {}), '(-1)\n', (370, 374), False, 'import sys\n'), ((519, 546), 'os.path.exists', 'os.path.exists', (['sys.argv[2]'], {}), '(sys.argv[2])\n', (533, 546), False, 'import os\n'), ((638, 650), 'sys.exit', 'sys.exit', (['(-1)'], {}), '(-1)\n', (646, 650), False, 'import sys\n')]
|
import vne
from vne.constants import nfeat, nsensors
from vne.model import simpleModel, init_weights_simple
from vne.persist import save_model, load_model
import numpy as np
import torch
import os
def encode_save(sig=np.random.random([1, nfeat, nsensors]), name='simpleModel_ini', dir_path="../src/vne/models"):
"""
This function will create a model, test it, then save a persistent version (a file)
Parameters
__________
sig:
a numpy array with shape (nsamples, nfeatures, nsensors). in general a single neural signal may contain multiple channels.
the multi-channel nature of the neural activations is a feature of the vne.
typically shaped with size (1,1,S) where S is number os sensors
the vne will map all the different channels to a single scalar encoded signal.
name:
string with the filename to save the model under
dir:
dir_path, the local directory to save the model
Returns
--------
model:
A copy of the encoder model generated by the function
"""
model = simpleModel().eval()
model.apply(init_weights_simple)
sig = torch.tensor(sig.astype(np.float32)).to('cpu')
enc = model(sig)
print("signal={}".format(sig))
print("encoded={}".format(enc))
# save the model
model.apply(vne.init_weights_simple)
save_model(encoder=model, name=name, dir_path=dir_path)
return model
def encode_load(sig=np.random.random([1, nfeat, nsensors]), name="simpleModel_ini", dir_path="../src/vne/models"):
"""
This function will load a saved model, test it, then save a persistent version (a file)
Parameters
----------
sig:
a numpy array with shape (nsamples, nfeatures, nsensors). in general a single neural signal may contain multiple channels.
the multi-channel nature of the neural activations is a feature of the vne.
typically shaped with size (1,1,S) where S is number os sensors
the vne will map all the different channels to a single scalar encoded signal.
name:
the filename of the saved model
dir_path:
the directory path to the folder containing the file with the saved model
"""
# load the saved model
model = load_model(name, dir_path)
# do some stuff
# save the model
model.apply(vne.init_weights_simple)
return model
# Function to Convert to ONNX
def Convert_ONNX(model=None, name="simpleModel_ini", dir_path="../src/vne/models"):
if model is None:
model = encode_load()
# set the model to inference mode (making sure)
model.eval()
name = os.path.join(dir_path, name)
# Let's create a dummy input tensor
dummy_input = torch.randn(1, nfeat, nsensors, requires_grad=True)
# Export the model
torch.onnx.export(model, # model being run
dummy_input, # model input (or a tuple for multiple inputs)
name + ".onnx", # where to save the model
export_params=True, # store the trained parameter weights inside the model file
opset_version=9, # the ONNX version to export the model to
do_constant_folding=True, # whether to execute constant folding for optimization
input_names=['sensorData'], # the model's input names
output_names=['modelOutput'], # the model's output names
dynamic_axes={'modelInput': {0: 'batch_size'}, # variable length axes
'modelOutput': {0: 'batch_size'}})
print(" ")
print('Model has been converted to ONNX')
if __name__ == '__main__':
# load persistent model from file
model_name = 'simpleModel_ini-Trivial19'
model = encode_save(name=model_name)
print("saved model")
# use the model
sig = np.random.random([1, nfeat, nsensors])
sig = torch.tensor(sig.astype(np.float32)).to('cpu')
enc = model(sig)
print("signal={}".format(sig))
print("encoded={}".format(enc))
print("ran model")
Convert_ONNX(model, name=model_name)
|
[
"vne.persist.save_model",
"torch.onnx.export",
"vne.model.simpleModel",
"torch.randn",
"vne.persist.load_model",
"numpy.random.random",
"os.path.join"
] |
[((219, 257), 'numpy.random.random', 'np.random.random', (['[1, nfeat, nsensors]'], {}), '([1, nfeat, nsensors])\n', (235, 257), True, 'import numpy as np\n'), ((1351, 1406), 'vne.persist.save_model', 'save_model', ([], {'encoder': 'model', 'name': 'name', 'dir_path': 'dir_path'}), '(encoder=model, name=name, dir_path=dir_path)\n', (1361, 1406), False, 'from vne.persist import save_model, load_model\n'), ((1446, 1484), 'numpy.random.random', 'np.random.random', (['[1, nfeat, nsensors]'], {}), '([1, nfeat, nsensors])\n', (1462, 1484), True, 'import numpy as np\n'), ((2249, 2275), 'vne.persist.load_model', 'load_model', (['name', 'dir_path'], {}), '(name, dir_path)\n', (2259, 2275), False, 'from vne.persist import save_model, load_model\n'), ((2626, 2654), 'os.path.join', 'os.path.join', (['dir_path', 'name'], {}), '(dir_path, name)\n', (2638, 2654), False, 'import os\n'), ((2713, 2764), 'torch.randn', 'torch.randn', (['(1)', 'nfeat', 'nsensors'], {'requires_grad': '(True)'}), '(1, nfeat, nsensors, requires_grad=True)\n', (2724, 2764), False, 'import torch\n'), ((2793, 3065), 'torch.onnx.export', 'torch.onnx.export', (['model', 'dummy_input', "(name + '.onnx')"], {'export_params': '(True)', 'opset_version': '(9)', 'do_constant_folding': '(True)', 'input_names': "['sensorData']", 'output_names': "['modelOutput']", 'dynamic_axes': "{'modelInput': {(0): 'batch_size'}, 'modelOutput': {(0): 'batch_size'}}"}), "(model, dummy_input, name + '.onnx', export_params=True,\n opset_version=9, do_constant_folding=True, input_names=['sensorData'],\n output_names=['modelOutput'], dynamic_axes={'modelInput': {(0):\n 'batch_size'}, 'modelOutput': {(0): 'batch_size'}})\n", (2810, 3065), False, 'import torch\n'), ((3865, 3903), 'numpy.random.random', 'np.random.random', (['[1, nfeat, nsensors]'], {}), '([1, nfeat, nsensors])\n', (3881, 3903), True, 'import numpy as np\n'), ((1077, 1090), 'vne.model.simpleModel', 'simpleModel', ([], {}), '()\n', (1088, 1090), False, 'from vne.model import simpleModel, init_weights_simple\n')]
|
import os
import argparse
import configargparse
import time
import glob
import numpy as np
import torch
from torch.utils.data import Dataset, DataLoader
from torchvision.transforms import functional as VF
from torch.nn import functional as F
import cv2
import model
import data
from eval.kitti_depth_eval_utils import *
from eval.depth_eval_utils import *
from data import create_dataset
import opts
class DirDataset(Dataset):
def __init__(self, dir, height, width, crop=None):
'''
crop: (top, left, height, width)
'''
self.filenames = glob.glob(os.path.join(args.input_dir, "*.jpg"))
self.height = height
self.width = width
self.crop = crop
def __getitem__(self, idx):
img = Image.open(self.filenames[idx])
if img.size[0] != self.width or img.size[1] != self.height:
img = img.resize((self.width, self.height), resample=Image.LANCZOS)
else:
print("No resize required")
if self.crop is not None:
img = img.crop(
(self.crop[1], self.crop[0], self.crop[1] + self.crop[3], self.crop[0] + self.crop[2]))
img = VF.to_tensor(img)
return {'path': self.filenames[idx], 'img': img}
def __len__(self):
return len(self.filenames)
if __name__ == '__main__':
args = opts.parse_args()
args.seq_len = 1
args.workers = 0
checkpoint = torch.load(args.checkpoint)
os.makedirs(args.output_dir, exist_ok=True)
model = checkpoint['model']
model.to(args.device)
model.eval()
dataset = DirDataset(args.input_dir, args.height, args.width)
dataloader = DataLoader(dataset, batch_size=12,
shuffle=False, num_workers=args.workers)
for i, batch in enumerate(dataloader):
with torch.no_grad():
fnames, imgs = batch['path'], batch['img']
imgs = imgs.to(args.device)
imgs_normalized = VF.normalize(
imgs, mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
depths, _, _ = model.depth_net(imgs_normalized)
depths = depths[0].cpu().numpy()
depths = np.squeeze(depths, 1)
assert len(depths), len(fnames)
vmin = np.min(1 / depths)
vmax = np.percentile(1 / depths, 95)
disps = 1 / depths
disps_rgb = convert_util.gray_to_rgb_np(
disps, cmap='magma', lb=vmin, ub=vmax)
for j in range(len(fnames)):
filename = fnames[j].split('/')[-1]
outname_noext = os.path.join(args.output_dir, filename[:-4])
if args.output_type == 'png':
cv2.imwrite(
outname_noext + '_pred.png',
255 * disps_rgb[j])
else:
np.savez(outname_noext + '.npz', disps_rgb[j])
|
[
"torchvision.transforms.functional.normalize",
"model.to",
"os.makedirs",
"torch.utils.data.DataLoader",
"torchvision.transforms.functional.to_tensor",
"os.path.join",
"torch.load",
"cv2.imwrite",
"numpy.savez",
"numpy.percentile",
"numpy.min",
"model.eval",
"numpy.squeeze",
"model.depth_net",
"torch.no_grad",
"opts.parse_args"
] |
[((1346, 1363), 'opts.parse_args', 'opts.parse_args', ([], {}), '()\n', (1361, 1363), False, 'import opts\n'), ((1425, 1452), 'torch.load', 'torch.load', (['args.checkpoint'], {}), '(args.checkpoint)\n', (1435, 1452), False, 'import torch\n'), ((1458, 1501), 'os.makedirs', 'os.makedirs', (['args.output_dir'], {'exist_ok': '(True)'}), '(args.output_dir, exist_ok=True)\n', (1469, 1501), False, 'import os\n'), ((1539, 1560), 'model.to', 'model.to', (['args.device'], {}), '(args.device)\n', (1547, 1560), False, 'import model\n'), ((1565, 1577), 'model.eval', 'model.eval', ([], {}), '()\n', (1575, 1577), False, 'import model\n'), ((1662, 1737), 'torch.utils.data.DataLoader', 'DataLoader', (['dataset'], {'batch_size': '(12)', 'shuffle': '(False)', 'num_workers': 'args.workers'}), '(dataset, batch_size=12, shuffle=False, num_workers=args.workers)\n', (1672, 1737), False, 'from torch.utils.data import Dataset, DataLoader\n'), ((1170, 1187), 'torchvision.transforms.functional.to_tensor', 'VF.to_tensor', (['img'], {}), '(img)\n', (1182, 1187), True, 'from torchvision.transforms import functional as VF\n'), ((586, 623), 'os.path.join', 'os.path.join', (['args.input_dir', '"""*.jpg"""'], {}), "(args.input_dir, '*.jpg')\n", (598, 623), False, 'import os\n'), ((1823, 1838), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (1836, 1838), False, 'import torch\n'), ((1965, 2038), 'torchvision.transforms.functional.normalize', 'VF.normalize', (['imgs'], {'mean': '[0.485, 0.456, 0.406]', 'std': '[0.229, 0.224, 0.225]'}), '(imgs, mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])\n', (1977, 2038), True, 'from torchvision.transforms import functional as VF\n'), ((2084, 2116), 'model.depth_net', 'model.depth_net', (['imgs_normalized'], {}), '(imgs_normalized)\n', (2099, 2116), False, 'import model\n'), ((2183, 2204), 'numpy.squeeze', 'np.squeeze', (['depths', '(1)'], {}), '(depths, 1)\n', (2193, 2204), True, 'import numpy as np\n'), ((2270, 2288), 'numpy.min', 'np.min', (['(1 / depths)'], {}), '(1 / depths)\n', (2276, 2288), True, 'import numpy as np\n'), ((2308, 2337), 'numpy.percentile', 'np.percentile', (['(1 / depths)', '(95)'], {}), '(1 / depths, 95)\n', (2321, 2337), True, 'import numpy as np\n'), ((2604, 2648), 'os.path.join', 'os.path.join', (['args.output_dir', 'filename[:-4]'], {}), '(args.output_dir, filename[:-4])\n', (2616, 2648), False, 'import os\n'), ((2715, 2775), 'cv2.imwrite', 'cv2.imwrite', (["(outname_noext + '_pred.png')", '(255 * disps_rgb[j])'], {}), "(outname_noext + '_pred.png', 255 * disps_rgb[j])\n", (2726, 2775), False, 'import cv2\n'), ((2867, 2913), 'numpy.savez', 'np.savez', (["(outname_noext + '.npz')", 'disps_rgb[j]'], {}), "(outname_noext + '.npz', disps_rgb[j])\n", (2875, 2913), True, 'import numpy as np\n')]
|
import filecmp
import glob
import os
import platform
import re
import shutil
import unittest
import zipfile
import update_searchable_options
class SearchableOptionTests(unittest.TestCase):
"""Tests searchable options to be up-to-date.
This test purpose is to generate these files, so whenever a
configurable or an action description changes we can use this
test to keep the files up-to-date.
"""
def test_searchable_options(self):
work_dir = os.getenv("TEST_TMPDIR")
expected_dir = os.path.join(work_dir, "expected")
plugin_list = update_searchable_options.generate_searchable_options(work_dir, expected_dir)
# Create actual tree
plugin_path = {
"Windows": "android-studio/plugins",
"Linux": "android-studio/plugins",
"Darwin": "Android Studio*.app/Contents/plugins",
}
actual_dir = os.path.join(work_dir, "actual")
[plugins_dir] = glob.glob(os.path.join(work_dir, plugin_path[platform.system()]))
for plugin in os.listdir(plugins_dir):
if plugin in plugin_list:
lib_dir = os.path.join(plugins_dir, plugin, "lib")
for jar in os.listdir(lib_dir):
if jar.endswith(".jar"):
with zipfile.ZipFile(os.path.join(lib_dir, jar)) as jar_file:
has_searchable_options = False
has_search_entry = False
for name in jar_file.namelist():
if re.match(r"search/.*searchableOptions\.xml", name):
jar_file.extract(name, path=os.path.join(actual_dir, plugin, jar))
has_searchable_options = True
if name == "search/":
has_search_entry = True
if has_searchable_options and not has_search_entry:
self.fail("Jar %s contains searchable options xmls, but it does " % jar +
"not have a search/ directory entry. IntelliJ requires the directory entry to find the .xmls")
eq = self.same_folders(filecmp.dircmp(expected_dir, actual_dir))
if not eq:
print("Searchable options comparison failed.")
print("The expected output is in outputs.zip, please update tools/adt/idea/searchable-options with it.")
print("Alternatively, if you are on Linux you can run: bazel run //tools/adt/idea/searchable-options:update_searchable_options")
undeclared_outputs = os.getenv("TEST_UNDECLARED_OUTPUTS_DIR")
for name in os.listdir(expected_dir):
shutil.copytree(os.path.join(expected_dir, name), os.path.join(undeclared_outputs, name))
self.fail("Searchable options differ")
def same_folders(self, diff):
if diff.diff_files:
return False
for sub_diff in diff.subdirs.values():
if not self.same_folders(sub_diff):
return False
return True
if __name__ == "__main__":
unittest.main()
|
[
"unittest.main",
"os.listdir",
"re.match",
"platform.system",
"update_searchable_options.generate_searchable_options",
"os.path.join",
"os.getenv",
"filecmp.dircmp"
] |
[((2802, 2817), 'unittest.main', 'unittest.main', ([], {}), '()\n', (2815, 2817), False, 'import unittest\n'), ((463, 487), 'os.getenv', 'os.getenv', (['"""TEST_TMPDIR"""'], {}), "('TEST_TMPDIR')\n", (472, 487), False, 'import os\n'), ((507, 541), 'os.path.join', 'os.path.join', (['work_dir', '"""expected"""'], {}), "(work_dir, 'expected')\n", (519, 541), False, 'import os\n'), ((561, 638), 'update_searchable_options.generate_searchable_options', 'update_searchable_options.generate_searchable_options', (['work_dir', 'expected_dir'], {}), '(work_dir, expected_dir)\n', (614, 638), False, 'import update_searchable_options\n'), ((848, 880), 'os.path.join', 'os.path.join', (['work_dir', '"""actual"""'], {}), "(work_dir, 'actual')\n", (860, 880), False, 'import os\n'), ((985, 1008), 'os.listdir', 'os.listdir', (['plugins_dir'], {}), '(plugins_dir)\n', (995, 1008), False, 'import os\n'), ((1962, 2002), 'filecmp.dircmp', 'filecmp.dircmp', (['expected_dir', 'actual_dir'], {}), '(expected_dir, actual_dir)\n', (1976, 2002), False, 'import filecmp\n'), ((2345, 2385), 'os.getenv', 'os.getenv', (['"""TEST_UNDECLARED_OUTPUTS_DIR"""'], {}), "('TEST_UNDECLARED_OUTPUTS_DIR')\n", (2354, 2385), False, 'import os\n'), ((2404, 2428), 'os.listdir', 'os.listdir', (['expected_dir'], {}), '(expected_dir)\n', (2414, 2428), False, 'import os\n'), ((1060, 1100), 'os.path.join', 'os.path.join', (['plugins_dir', 'plugin', '"""lib"""'], {}), "(plugins_dir, plugin, 'lib')\n", (1072, 1100), False, 'import os\n'), ((1120, 1139), 'os.listdir', 'os.listdir', (['lib_dir'], {}), '(lib_dir)\n', (1130, 1139), False, 'import os\n'), ((946, 963), 'platform.system', 'platform.system', ([], {}), '()\n', (961, 963), False, 'import platform\n'), ((2454, 2486), 'os.path.join', 'os.path.join', (['expected_dir', 'name'], {}), '(expected_dir, name)\n', (2466, 2486), False, 'import os\n'), ((2488, 2526), 'os.path.join', 'os.path.join', (['undeclared_outputs', 'name'], {}), '(undeclared_outputs, name)\n', (2500, 2526), False, 'import os\n'), ((1209, 1235), 'os.path.join', 'os.path.join', (['lib_dir', 'jar'], {}), '(lib_dir, jar)\n', (1221, 1235), False, 'import os\n'), ((1400, 1450), 're.match', 're.match', (['"""search/.*searchableOptions\\\\.xml"""', 'name'], {}), "('search/.*searchableOptions\\\\.xml', name)\n", (1408, 1450), False, 'import re\n'), ((1498, 1535), 'os.path.join', 'os.path.join', (['actual_dir', 'plugin', 'jar'], {}), '(actual_dir, plugin, jar)\n', (1510, 1535), False, 'import os\n')]
|
"""Armijo rule."""
import numpy as np
from optimus.types import LRMethod, Function
class Armijo(LRMethod):
"""Armijo method for finding Learning Rate.
This method successively reduces the learning rate until it finds the
resulting change to be as good as a linear approximation of the function.
"""
def __init__(
self,
initial_lr: float,
tolerance: float,
decrease_factor: float,
max_iters: int = 10,
):
self.initial_lr = initial_lr
self.tolerance = tolerance
self.decrease_factor = decrease_factor
self.max_iters = max_iters
def __call__(
self,
parameters: np.ndarray,
function_value: float,
gradient: np.ndarray,
direction: np.ndarray,
step: int,
objective_function: Function,
) -> float:
lr = self.initial_lr
def new_value(lr):
return function_value - objective_function(parameters - lr * direction)
def desired_value(lr):
return self.tolerance * lr * np.dot(gradient, direction)
while new_value(lr) < desired_value(lr):
lr *= self.decrease_factor
return lr
|
[
"numpy.dot"
] |
[((1071, 1098), 'numpy.dot', 'np.dot', (['gradient', 'direction'], {}), '(gradient, direction)\n', (1077, 1098), True, 'import numpy as np\n')]
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
import logging
from osc_lib import exceptions
from esileapclient.common import base
LOG = logging.getLogger(__name__)
class Offer(base.Resource):
detailed_fields = {
'availabilities': "Availabilities",
'end_time': "End Time",
'lessee': "Lessee",
'lessee_id': "Lessee ID",
'name': "Name",
'parent_lease_uuid': "Parent Lease UUID",
'project': "Project",
'project_id': "Project ID",
'properties': "Properties",
'resource': "Resource",
'resource_type': "Resource Type",
'resource_uuid': "Resource UUID",
'start_time': "Start Time",
'status': "Status",
'uuid': "UUID",
}
fields = {
'uuid': "UUID",
'resource': "Resource",
'lessee': "Lessee",
'start_time': "Start Time",
'end_time': "End Time",
'status': "Status",
'availabilities': "Availabilities",
}
_creation_attributes = ['resource_type', 'resource_uuid',
'start_time', 'end_time', 'status',
'project_id', 'properties', 'name',
'lessee_id']
def __repr__(self):
return "<Offer %s>" % self._info
class OfferManager(base.Manager):
resource_class = Offer
_resource_name = 'offers'
def create(self, os_esileap_api_version=None, **kwargs):
"""Create an offer based on a kwargs dictionary of attributes.
:returns: a :class: `Offer` object
"""
offer = self._create(os_esileap_api_version=os_esileap_api_version,
**kwargs)
return offer
def list(self, filters, os_esileap_api_version=None):
"""Retrieve a list of offers.
:returns: A list of offers.
"""
resource_id = ''
url_variables = OfferManager._url_variables(filters)
url = self._path(resource_id) + url_variables
offers = self._list(url,
os_esileap_api_version=os_esileap_api_version)
if type(offers) is list:
return offers
def get(self, offer_uuid):
"""Get an offer with the specified identifier.
:param offer_uuid: The uuid of an offer.
:returns: a :class:`Offer` object.
"""
offer = self._get(offer_uuid)
return offer
def delete(self, offer_uuid):
"""Delete an offer with the specified identifier.
:param offer_uuid: The uuid of an offer.
:returns: a :class:`Offer` object.
"""
self._delete(resource_id=offer_uuid)
def claim(self, offer_uuid, **kwargs):
"""Claim an offer with the specified identifier.
:param offer_uuid: The uuid of an offer.
:returns: a :class:`Offer` object.
"""
url = self._path(offer_uuid) + "/claim"
resp, body = self.api.json_request('POST', url, body=kwargs)
if resp.status_code == 201:
return self.resource_class(self, body)
else:
raise exceptions.CommandError(json.loads(resp.text)['faultstring'])
|
[
"json.loads",
"logging.getLogger"
] |
[((678, 705), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (695, 705), False, 'import logging\n'), ((3666, 3687), 'json.loads', 'json.loads', (['resp.text'], {}), '(resp.text)\n', (3676, 3687), False, 'import json\n')]
|
#!/usr/bin/python3
from pygame import mixer
import re, os, signal
import paho.mqtt.client as mqtt
import config
class Player():
def __init__(self, path=""):
self.__path = path
self.__mixer = mixer
self.__mixer.init()
self.__vlc_pid = None
self.__vlc_url = None
def setPath(self, path):
self.__path = path
def play(self):
self.__mixer.music.load(self.__path)
self.__mixer.music.play()
def stop(self):
self.__mixer.music.stop()
def pause(self):
self.__mixer.music.pause()
def resume(self):
self.__mixer.music.unpause()
def playWeb(self, url):
if (self.__vlc_pid == None):
self.__vlc_url = url
self.__vlc_pid = os.fork()
if self.__vlc_pid == 0:
os.system("cvlc --intf dummy " + self.__vlc_url)
os._exit(0)
else:
self.stopWeb()
self.playWeb(url)
def stopWeb(self):
os.system("killall vlc -9")
self.__vlc_pid = None
player = Player("music.flac")
mqtt_client = mqtt.Client(config.HOSTNAME)
mqtt_client.username_pw_set(config.MQTT_USERNAME, config.MQTT_PASSWORD)
mqtt_client.connect(config.MQTT_BROKER, config.MQTT_BROKER_PORT)
mqtt_client.subscribe(config.MQTT_TOPIC, config.MQTT_QOS)
# Define event callbacks
def on_connect(client, userdata, flags, rc):
print("rc: " + str(rc))
def on_message(client, obj, msg):
print(msg.topic + " " + str(msg.qos) + " " + str(msg.payload))
request = msg.payload.decode("utf-8")
play_web = re.match(r'play web (.*)', request, re.I)
if play_web:
player.playWeb(play_web.group(1))
play = re.match(r'play (.*)', request, re.I)
if play:
player.setPath(play.group(1))
player.play()
elif (request == "play"):
player.play()
elif (request == "pause"):
player.pause()
elif (request == "resume"):
player.resume()
elif (request == "stop"):
player.stop()
elif (request == "stop web"):
player.stopWeb()
def on_publish(client, obj, mid):
print("mid: " + str(mid))
def on_subscribe(client, obj, mid, granted_qos):
print("Subscribed: " + str(mid) + " " + str(granted_qos))
def on_log(client, obj, level, string):
print(string)
# Assign event callbacks
mqtt_client.on_message = on_message
mqtt_client.on_connect = on_connect
mqtt_client.on_publish = on_publish
mqtt_client.on_subscribe = on_subscribe
def main():
while True:
try:
mqtt_client.loop()
except Exception as e:
print(e)
if __name__ == "__main__":
main()
|
[
"os.system",
"re.match",
"os._exit",
"os.fork",
"paho.mqtt.client.Client"
] |
[((1113, 1141), 'paho.mqtt.client.Client', 'mqtt.Client', (['config.HOSTNAME'], {}), '(config.HOSTNAME)\n', (1124, 1141), True, 'import paho.mqtt.client as mqtt\n'), ((1597, 1637), 're.match', 're.match', (['"""play web (.*)"""', 'request', 're.I'], {}), "('play web (.*)', request, re.I)\n", (1605, 1637), False, 'import re, os, signal\n'), ((1711, 1747), 're.match', 're.match', (['"""play (.*)"""', 'request', 're.I'], {}), "('play (.*)', request, re.I)\n", (1719, 1747), False, 'import re, os, signal\n'), ((1008, 1035), 'os.system', 'os.system', (['"""killall vlc -9"""'], {}), "('killall vlc -9')\n", (1017, 1035), False, 'import re, os, signal\n'), ((766, 775), 'os.fork', 'os.fork', ([], {}), '()\n', (773, 775), False, 'import re, os, signal\n'), ((828, 876), 'os.system', 'os.system', (["('cvlc --intf dummy ' + self.__vlc_url)"], {}), "('cvlc --intf dummy ' + self.__vlc_url)\n", (837, 876), False, 'import re, os, signal\n'), ((893, 904), 'os._exit', 'os._exit', (['(0)'], {}), '(0)\n', (901, 904), False, 'import re, os, signal\n')]
|
import unittest
import binary_search
class test_binary_search(unittest.TestCase):
def test_binary_search_returns_true_when_search_key_found(self):
self.assertTrue(binary_search.binary_search([3, 1, 2], 2))
def test_binary_search_returns_false_when_search_key_not_found(self):
self.assertFalse(binary_search.binary_search([3, 1, 2], 0))
|
[
"binary_search.binary_search"
] |
[((177, 218), 'binary_search.binary_search', 'binary_search.binary_search', (['[3, 1, 2]', '(2)'], {}), '([3, 1, 2], 2)\n', (204, 218), False, 'import binary_search\n'), ((320, 361), 'binary_search.binary_search', 'binary_search.binary_search', (['[3, 1, 2]', '(0)'], {}), '([3, 1, 2], 0)\n', (347, 361), False, 'import binary_search\n')]
|
from astropy.cosmology.funcs import z_at_value
import numpy as np
from pyHalo.single_realization import SingleHalo
import numpy.testing as npt
import numpy as np
from pyHalo.Halos.HaloModels.ULDM import ULDMFieldHalo, ULDMSubhalo
from pyHalo.Halos.lens_cosmo import LensCosmo
from pyHalo.Cosmology.cosmology import Cosmology
from lenstronomy.LensModel.Profiles.cnfw import CNFW
from lenstronomy.LensModel.Profiles.nfw import NFW
from lenstronomy.LensModel.Profiles.uldm import Uldm
import pytest
class TestULDMHalo(object):
def setup(self):
mass = 1e9
x = 0.5
y = 1.
r3d = np.sqrt(1 + 0.5 ** 2 + 70**2)
self.r3d = r3d
self.z = 0.25
sub_flag = True
mdef = 'ULDM'
self.H0 = 70
self.omega_baryon = 0.03
self.omega_DM = 0.25
self.sigma8 = 0.82
curvature = 'flat'
self.ns = 0.9608
cosmo_params = {'H0': self.H0, 'Om0': self.omega_baryon + self.omega_DM, 'Ob0': self.omega_baryon,
'sigma8': self.sigma8, 'ns': self.ns, 'curvature': curvature}
self._dm, self._bar = self.omega_DM, self.omega_baryon
cosmo = Cosmology(cosmo_kwargs=cosmo_params)
self.lens_cosmo = LensCosmo(self.z, 2., cosmo)
profile_args = {'RocheNorm': 1.2, 'RocheNu': 2/3,
'evaluate_mc_at_zlens': False,
'log_mc': None, 'c_scale': 60.,
'c_power': -0.17, 'c_scatter': False,
'mc_model': 'diemer19', 'LOS_truncation_factor': 40,
'c_scatter_dex': 0.1, 'mc_mdef': '200c',
'log10_m_uldm':-22, 'uldm_plaw':1/3}
self.subhalo = ULDMSubhalo(mass, x, y, r3d, mdef, self.z,
sub_flag, self.lens_cosmo,
profile_args, unique_tag=np.random.rand())
self.fieldhalo = ULDMFieldHalo(mass, x, y, r3d, mdef, self.z,
sub_flag, self.lens_cosmo,
profile_args, unique_tag=np.random.rand())
def test_lenstronomy_ID(self):
ID = self.fieldhalo.lenstronomy_ID
npt.assert_string_equal(ID[0], 'CNFW')
npt.assert_string_equal(ID[1], 'ULDM')
ID = self.subhalo.lenstronomy_ID
npt.assert_string_equal(ID[0], 'CNFW')
npt.assert_string_equal(ID[1], 'ULDM')
def test_redshift_eval(self):
z_subhalo = self.subhalo.z_eval
z_field = self.fieldhalo.z_eval
npt.assert_equal(z_field, self.z)
# because the concentration is evaluated at infall, and z_infall > z
npt.assert_equal(True, z_subhalo > z_field)
def test_profile_load(self):
# test cored composite profile
profile_args = {'log10_m_uldm': -22, 'uldm_plaw': 1/3, 'scale_nfw':False}
single_halo = SingleHalo(1e8, 0.5, 0.5, 'ULDM', 0.5, 0.5, 1.5, None, True, profile_args, None)
lens_model_list, redshift_array, kwargs_lens, numerical_interp = single_halo.\
lensing_quantities(add_mass_sheet_correction=False)
npt.assert_string_equal(lens_model_list[1], 'ULDM')
npt.assert_string_equal(lens_model_list[0], 'CNFW')
npt.assert_equal(True, len(kwargs_lens)==2)
npt.assert_equal(True, len(redshift_array)==2)
def test_profile_normalization(self):
"""
Test that the mass enclosed within r200 of the composite profile is correct
and check that the ULDM core density is correct.
"""
profile_args = {'log10_m_uldm': -21, 'uldm_plaw': 1/3, 'scale_nfw':True}
mass = 1e10
zl = 0.5
zs = 1.5
single_halo = SingleHalo(mass, 0.5, 0.5, 'ULDM', zl, zl, zs, None, True, profile_args, None)
_, _, kwargs_lens, _ = single_halo.lensing_quantities(add_mass_sheet_correction=False)
Rs_angle, _ = single_halo.halos[0].lens_cosmo.nfw_physical2angle(mass, single_halo.halos[0].c, zl)
sigma_crit = single_halo.halos[0].lens_cosmo.sigmacrit
r200 = single_halo.halos[0].c * Rs_angle
cnfw_kwargs, uldm_kwargs = kwargs_lens
M_nfw = CNFW().mass_3d_lens(r200, cnfw_kwargs['Rs'], cnfw_kwargs['alpha_Rs']*sigma_crit, cnfw_kwargs['r_core'])
M_uldm = Uldm().mass_3d_lens(r200, uldm_kwargs['kappa_0']*sigma_crit, uldm_kwargs['theta_c'])
npt.assert_almost_equal((M_uldm+M_nfw)/mass,1,decimal=2) # less than 1% error
_,theta_c,kappa_0 = single_halo.halos[0].profile_args
rho0 = Uldm().density_lens(0,uldm_kwargs['kappa_0'],
uldm_kwargs['theta_c'])
rhos = CNFW().density_lens(0,cnfw_kwargs['Rs'],
cnfw_kwargs['alpha_Rs'],
cnfw_kwargs['r_core'])
rho_goal = Uldm().density_lens(0,kappa_0,theta_c)
npt.assert_array_less(np.array([1-(rho0+rhos)/rho_goal]),np.array([0.02])) # less than 2% error
if __name__ == '__main__':
pytest.main()
|
[
"pyHalo.Cosmology.cosmology.Cosmology",
"lenstronomy.LensModel.Profiles.uldm.Uldm",
"lenstronomy.LensModel.Profiles.cnfw.CNFW",
"numpy.testing.assert_almost_equal",
"pyHalo.single_realization.SingleHalo",
"pytest.main",
"numpy.testing.assert_string_equal",
"numpy.array",
"numpy.testing.assert_equal",
"numpy.random.rand",
"pyHalo.Halos.lens_cosmo.LensCosmo",
"numpy.sqrt"
] |
[((5016, 5029), 'pytest.main', 'pytest.main', ([], {}), '()\n', (5027, 5029), False, 'import pytest\n'), ((613, 644), 'numpy.sqrt', 'np.sqrt', (['(1 + 0.5 ** 2 + 70 ** 2)'], {}), '(1 + 0.5 ** 2 + 70 ** 2)\n', (620, 644), True, 'import numpy as np\n'), ((1168, 1204), 'pyHalo.Cosmology.cosmology.Cosmology', 'Cosmology', ([], {'cosmo_kwargs': 'cosmo_params'}), '(cosmo_kwargs=cosmo_params)\n', (1177, 1204), False, 'from pyHalo.Cosmology.cosmology import Cosmology\n'), ((1231, 1260), 'pyHalo.Halos.lens_cosmo.LensCosmo', 'LensCosmo', (['self.z', '(2.0)', 'cosmo'], {}), '(self.z, 2.0, cosmo)\n', (1240, 1260), False, 'from pyHalo.Halos.lens_cosmo import LensCosmo\n'), ((2208, 2246), 'numpy.testing.assert_string_equal', 'npt.assert_string_equal', (['ID[0]', '"""CNFW"""'], {}), "(ID[0], 'CNFW')\n", (2231, 2246), True, 'import numpy.testing as npt\n'), ((2255, 2293), 'numpy.testing.assert_string_equal', 'npt.assert_string_equal', (['ID[1]', '"""ULDM"""'], {}), "(ID[1], 'ULDM')\n", (2278, 2293), True, 'import numpy.testing as npt\n'), ((2344, 2382), 'numpy.testing.assert_string_equal', 'npt.assert_string_equal', (['ID[0]', '"""CNFW"""'], {}), "(ID[0], 'CNFW')\n", (2367, 2382), True, 'import numpy.testing as npt\n'), ((2391, 2429), 'numpy.testing.assert_string_equal', 'npt.assert_string_equal', (['ID[1]', '"""ULDM"""'], {}), "(ID[1], 'ULDM')\n", (2414, 2429), True, 'import numpy.testing as npt\n'), ((2554, 2587), 'numpy.testing.assert_equal', 'npt.assert_equal', (['z_field', 'self.z'], {}), '(z_field, self.z)\n', (2570, 2587), True, 'import numpy.testing as npt\n'), ((2673, 2716), 'numpy.testing.assert_equal', 'npt.assert_equal', (['(True)', '(z_subhalo > z_field)'], {}), '(True, z_subhalo > z_field)\n', (2689, 2716), True, 'import numpy.testing as npt\n'), ((2897, 2989), 'pyHalo.single_realization.SingleHalo', 'SingleHalo', (['(100000000.0)', '(0.5)', '(0.5)', '"""ULDM"""', '(0.5)', '(0.5)', '(1.5)', 'None', '(True)', 'profile_args', 'None'], {}), "(100000000.0, 0.5, 0.5, 'ULDM', 0.5, 0.5, 1.5, None, True,\n profile_args, None)\n", (2907, 2989), False, 'from pyHalo.single_realization import SingleHalo\n'), ((3137, 3188), 'numpy.testing.assert_string_equal', 'npt.assert_string_equal', (['lens_model_list[1]', '"""ULDM"""'], {}), "(lens_model_list[1], 'ULDM')\n", (3160, 3188), True, 'import numpy.testing as npt\n'), ((3197, 3248), 'numpy.testing.assert_string_equal', 'npt.assert_string_equal', (['lens_model_list[0]', '"""CNFW"""'], {}), "(lens_model_list[0], 'CNFW')\n", (3220, 3248), True, 'import numpy.testing as npt\n'), ((3722, 3800), 'pyHalo.single_realization.SingleHalo', 'SingleHalo', (['mass', '(0.5)', '(0.5)', '"""ULDM"""', 'zl', 'zl', 'zs', 'None', '(True)', 'profile_args', 'None'], {}), "(mass, 0.5, 0.5, 'ULDM', zl, zl, zs, None, True, profile_args, None)\n", (3732, 3800), False, 'from pyHalo.single_realization import SingleHalo\n'), ((4392, 4454), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (['((M_uldm + M_nfw) / mass)', '(1)'], {'decimal': '(2)'}), '((M_uldm + M_nfw) / mass, 1, decimal=2)\n', (4415, 4454), True, 'import numpy.testing as npt\n'), ((4911, 4951), 'numpy.array', 'np.array', (['[1 - (rho0 + rhos) / rho_goal]'], {}), '([1 - (rho0 + rhos) / rho_goal])\n', (4919, 4951), True, 'import numpy as np\n'), ((4946, 4962), 'numpy.array', 'np.array', (['[0.02]'], {}), '([0.02])\n', (4954, 4962), True, 'import numpy as np\n'), ((1884, 1900), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (1898, 1900), True, 'import numpy as np\n'), ((2102, 2118), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (2116, 2118), True, 'import numpy as np\n'), ((4178, 4184), 'lenstronomy.LensModel.Profiles.cnfw.CNFW', 'CNFW', ([], {}), '()\n', (4182, 4184), False, 'from lenstronomy.LensModel.Profiles.cnfw import CNFW\n'), ((4299, 4305), 'lenstronomy.LensModel.Profiles.uldm.Uldm', 'Uldm', ([], {}), '()\n', (4303, 4305), False, 'from lenstronomy.LensModel.Profiles.uldm import Uldm\n'), ((4547, 4553), 'lenstronomy.LensModel.Profiles.uldm.Uldm', 'Uldm', ([], {}), '()\n', (4551, 4553), False, 'from lenstronomy.LensModel.Profiles.uldm import Uldm\n'), ((4668, 4674), 'lenstronomy.LensModel.Profiles.cnfw.CNFW', 'CNFW', ([], {}), '()\n', (4672, 4674), False, 'from lenstronomy.LensModel.Profiles.cnfw import CNFW\n'), ((4842, 4848), 'lenstronomy.LensModel.Profiles.uldm.Uldm', 'Uldm', ([], {}), '()\n', (4846, 4848), False, 'from lenstronomy.LensModel.Profiles.uldm import Uldm\n')]
|
from django.contrib.auth.forms import UserCreationForm
from django.contrib.auth import get_user_model
class UserForm(UserCreationForm):
class Meta:
model = get_user_model()
fields = ('username', '<PASSWORD>', '<PASSWORD>', )
|
[
"django.contrib.auth.get_user_model"
] |
[((169, 185), 'django.contrib.auth.get_user_model', 'get_user_model', ([], {}), '()\n', (183, 185), False, 'from django.contrib.auth import get_user_model\n')]
|
from generator import Generator
import numpy as np, random
np.set_printoptions(precision=4, suppress=True, linewidth=132)
from tensorflow import keras
from tensorflow.keras.layers import LSTM, Dense, Input
from tensorflow.keras import Model
from tensorflow.keras.optimizers import Adagrad
def create_net(nwords, batch_size, hidden=100):
inp = Input((None, nwords), batch_size=batch_size)
r1 = LSTM(hidden, return_sequences=True, stateful=True)(inp)
#r2 = LSTM(hidden, return_sequences=True)(r1)
probs = Dense(nwords, activation="softmax")(r1)
model = Model(inp, probs)
model.compile(optimizer=Adagrad(learning_rate=0.01), loss="categorical_crossentropy")
return model
def generate_from_model(model, g, length, batch_size):
#print("------- generate ----------")
model.reset_states()
nwords = g.NWords
rows = []
row = [random.randint(0, nwords-1) for _ in range(batch_size)] # [w]
rows.append(row)
for t in range(length-1):
x = np.array([g.vectorize(xi) for xi in row])
y = model.predict(x[:,None,:])[:,0,:] # y: [mb, w], t=0
pvec = y**3
pvec = pvec/np.sum(pvec, axis=-1, keepdims=True) # -> [mb, w]
row = [np.random.choice(nwords, p=p) for p in pvec]
rows.append(row)
rows = np.array(rows) # [t,mb]
return rows.transpose((1,0))
def generate_batch(g, length, batch_size):
#print("generate_batch(%s, %s)..." % (length, batch_size))
sequences = np.array([g.generate(length+1, as_vectors=True) for _ in range(batch_size)])
#print("sequences:", sequences.shape)
x = sequences[:,:-1,:]
y_ = sequences[:,1:,:]
return x, y_
def train(model, g, length, batch_size):
valid_ma = 0.0
steps = 0
for iteration in range(100000):
#print
x, y_ = generate_batch(g, length, batch_size)
loss = model.train_on_batch(x, y_)
if iteration and iteration % 50 == 0:
generated = generate_from_model(model, g, length, batch_size)[0]
#print(type(generated), generated.shape, generated)
valid_length = g.validate(generated)
valid_ma += 0.1*(valid_length-valid_ma)
if iteration % 100 == 0:
print(generated[:valid_length], "*", generated[valid_length:], " valid length:", valid_length)
print("Batches:", iteration, " steps:", iteration*length*batch_size, " loss/step:", loss/x.shape[1],
" moving average:", valid_ma)
if __name__ == '__main__':
nwords = 10
length = 50
distance = 5
r = 2
batch_size = 5
g = Generator(nwords, distance, r)
model = create_net(nwords, batch_size)
train(model, g, length, batch_size)
|
[
"numpy.set_printoptions",
"numpy.sum",
"random.randint",
"tensorflow.keras.layers.Dense",
"tensorflow.keras.Model",
"numpy.array",
"tensorflow.keras.layers.Input",
"tensorflow.keras.layers.LSTM",
"numpy.random.choice",
"generator.Generator",
"tensorflow.keras.optimizers.Adagrad"
] |
[((60, 122), 'numpy.set_printoptions', 'np.set_printoptions', ([], {'precision': '(4)', 'suppress': '(True)', 'linewidth': '(132)'}), '(precision=4, suppress=True, linewidth=132)\n', (79, 122), True, 'import numpy as np, random\n'), ((350, 394), 'tensorflow.keras.layers.Input', 'Input', (['(None, nwords)'], {'batch_size': 'batch_size'}), '((None, nwords), batch_size=batch_size)\n', (355, 394), False, 'from tensorflow.keras.layers import LSTM, Dense, Input\n'), ((574, 591), 'tensorflow.keras.Model', 'Model', (['inp', 'probs'], {}), '(inp, probs)\n', (579, 591), False, 'from tensorflow.keras import Model\n'), ((1337, 1351), 'numpy.array', 'np.array', (['rows'], {}), '(rows)\n', (1345, 1351), True, 'import numpy as np, random\n'), ((2733, 2763), 'generator.Generator', 'Generator', (['nwords', 'distance', 'r'], {}), '(nwords, distance, r)\n', (2742, 2763), False, 'from generator import Generator\n'), ((404, 454), 'tensorflow.keras.layers.LSTM', 'LSTM', (['hidden'], {'return_sequences': '(True)', 'stateful': '(True)'}), '(hidden, return_sequences=True, stateful=True)\n', (408, 454), False, 'from tensorflow.keras.layers import LSTM, Dense, Input\n'), ((522, 557), 'tensorflow.keras.layers.Dense', 'Dense', (['nwords'], {'activation': '"""softmax"""'}), "(nwords, activation='softmax')\n", (527, 557), False, 'from tensorflow.keras.layers import LSTM, Dense, Input\n'), ((874, 903), 'random.randint', 'random.randint', (['(0)', '(nwords - 1)'], {}), '(0, nwords - 1)\n', (888, 903), False, 'import numpy as np, random\n'), ((620, 647), 'tensorflow.keras.optimizers.Adagrad', 'Adagrad', ([], {'learning_rate': '(0.01)'}), '(learning_rate=0.01)\n', (627, 647), False, 'from tensorflow.keras.optimizers import Adagrad\n'), ((1166, 1202), 'numpy.sum', 'np.sum', (['pvec'], {'axis': '(-1)', 'keepdims': '(True)'}), '(pvec, axis=-1, keepdims=True)\n', (1172, 1202), True, 'import numpy as np, random\n'), ((1247, 1276), 'numpy.random.choice', 'np.random.choice', (['nwords'], {'p': 'p'}), '(nwords, p=p)\n', (1263, 1276), True, 'import numpy as np, random\n')]
|
import json
import re
import matplotlib.pyplot as plt
import pandas as pd
import scipy.stats
import eternal.card
import eternal.ewc
import eternal.plot
CARDS_DATA = eternal.card.ALL.data
def imode(src):
"""Return a copy of a series where all case-insensitive versions have been replaced with the most common case sensitive variant."""
dest = src.copy()
for lower, subset in src.groupby(src.str.lower()):
dest[subset.index] = subset.mode().values[0]
return dest
if __name__ == '__main__':
# The structure of this CSV is FarmingEternal's 7-win run breakdown (Google Sheet) exported as CSV
df_7win_decks = pd.read_csv('7win_decks_set12.csv')[['s', 'Factions', 'Contributor', 'Image', 'EWC', 'EWC-P', 'W',
'L', 'Ep. #']]
df_7win_decks['Deck'] = None
for id, row in df_7win_decks.iterrows():
deck = eternal.ewc.parse_deckbuilder_url(row['EWC-P'])
deck.main_data.name = id
deck.main_data['DeckId'] = id
deck.main_data['PowerCount'] = [card.power_count() for card in deck.main_cards]
deck.main_data['MarketAccess'] = [card.has_market_access() for card in deck.main_cards]
df_7win_decks.at[id, 'Deck'] = deck
df_7win_decks['Contributor'] = imode(df_7win_decks['Contributor'])
df_7win_decks['MainFaction'] = df_7win_decks.Deck.apply(lambda x: ''.join(x.faction()[0]))
df_7win_decks['SplashFaction'] = df_7win_decks.Deck.apply(lambda x: ''.join(x.faction()[1]))
all_cards = pd.concat(df_7win_decks.Deck.apply(lambda x: x.main_data).tolist())
all_cards['DeckMainFaction'] = all_cards.DeckId.map(df_7win_decks['MainFaction'])
all_cards['DeckSplashFaction'] = all_cards.DeckId.map(df_7win_decks['SplashFaction'])
all_cards['IsSplash'] = all_cards.apply(lambda x: bool(set(x['DeckSplashFaction']).intersection(x['Influence'])), axis=1)
all_cards['Faction'] = all_cards['Influence'].apply(eternal.card.influence_to_faction)
all_cards['Contributor'] = all_cards.DeckId.map(df_7win_decks['Contributor'])
for faction in 'FTJPS':
all_cards[faction] = all_cards['Faction'].str.contains(faction)
# Playable deck counts
card_factions = set(map(eternal.card.influence_to_faction, all_cards[all_cards['Type'] != 'Power']['Influence'].unique()))
playable_deck_count_by_faction = {}
for faction in card_factions:
if faction == 'None':
faction = ''
is_deck_playable = lambda x: set(x['MainFaction']).union(set(x['SplashFaction'])).issuperset(faction)
deck_count = df_7win_decks.apply(is_deck_playable, axis=1).sum()
playable_deck_count_by_faction[faction] = deck_count
playable_deck_count_by_faction['None'] = len(df_7win_decks)
# ********** TOP CARDS (LISTS) **************
# Figure out card count statistics
card_counts = all_cards.groupby('Name')['Faction'].apply(lambda x: pd.Series({'Faction': x[0], 'Count': x.size})).unstack(1)
card_counts['PossibleDecks'] = card_counts['Faction'].map(playable_deck_count_by_faction)
card_counts['CountPerDeck'] = card_counts['Count'] / card_counts['PossibleDecks']
card_counts = card_counts.merge(CARDS_DATA, left_index=True, right_on='Name', how='left')
card_counts['MarketAccess'] = card_counts.index.map(dict(([(x.id, x.has_market_access()) for x in eternal.card.ALL.cards])))
# Frequency normalized (pick, boosting, faction, rarity)
CURRENT_SET = 12
DRAFT_FORMAT = '12.1'
with open(f'boosting_data/{DRAFT_FORMAT}.json') as fin:
DRAFT_PACK_BOOSTING = json.load(fin)['boosting']
freq_faction_lookup = {}
for faction in card_counts['Faction'].unique():
freq_faction_lookup[faction] = (df_7win_decks['MainFaction'] + df_7win_decks['SplashFaction']).str.contains(faction).sum() / len(df_7win_decks)
freq_faction_lookup['None'] = 1.0
freq_faction = card_counts['Faction'].map(freq_faction_lookup)
# Determine base offer rates by card
currentset_rarity_counts = CARDS_DATA[CARDS_DATA['SetNumber'] == CURRENT_SET]['Rarity'].value_counts()
currentset_rarity_counts.drop('Promo', inplace=True, errors='ignore')
draft_pack_cards = CARDS_DATA.loc[DRAFT_PACK_BOOSTING.keys()].copy()
draft_pack_cards['Boosting'] = draft_pack_cards.index.map(DRAFT_PACK_BOOSTING)
draft_pack_rarity_counts = draft_pack_cards.groupby('Rarity')['Boosting'].sum()
draft_pack_rarity_counts.drop('Promo', inplace=True, errors='ignore')
draft_pack_cards['BoostedFreq'] = draft_pack_cards['Boosting'] / draft_pack_cards['Rarity'].map(draft_pack_rarity_counts)
currentset_index = card_counts['SetNumber'] == CURRENT_SET
draft_pack_index = ~currentset_index & ~(card_counts['Name'].str.endswith('Sigil'))
freq_rarity_per_pack = card_counts['Rarity'].map({'Common': 8.0, 'Uncommon': 3.0, 'Rare': 0.905, 'Legendary': 0.095})
base_offer_rate = pd.Series(index=card_counts.index, dtype='float64')
base_offer_rate[currentset_index] = 1.0 / (card_counts[currentset_index])['Rarity'].map(currentset_rarity_counts)
base_offer_rate[draft_pack_index] = draft_pack_cards['BoostedFreq'].loc[base_offer_rate[draft_pack_index].index]
offer_rate = 2 * freq_rarity_per_pack * base_offer_rate # 2 packs for each pool
card_counts['OfferRate'] = offer_rate
card_counts['CountPerOffer'] = card_counts['Count'] / (card_counts['OfferRate'])
card_counts['CountPerOfferDeck'] = (card_counts['Count'] / (card_counts['OfferRate'] * card_counts['PossibleDecks'])).astype('float')
# Analyze the top commons
CARD_COUNT_DISPLAY_COLS = ['Name', 'Rarity', 'Faction', 'PossibleDecks', 'OfferRate', 'Count', 'CountPerDeck', 'CountPerOffer', 'CountPerOfferDeck']
N = 20
RARITY = ['Common', 'Uncommon', 'Rare', 'Legendary']
# Analyze the top cards by count
top_common_cards = card_counts[card_counts['Rarity'].isin(RARITY)].sort_values('Count', ascending=False)
print("******Top {N} {Rarity} cards (by count)*****".format(N=N, Rarity='+'.join(RARITY)))
print("NOTE: OfferRates is the number of cards you would expect in a given 4-pack draft")
print("NOTE: CountPerOfferDeck also corrects for possible Decks so faction frequency is accounted for")
print(top_common_cards[CARD_COUNT_DISPLAY_COLS].head(N))
print("\n")
# Analyze the top cards by playable deck faction
top_common_cards = card_counts[card_counts['Rarity'].isin(RARITY)].sort_values('CountPerDeck', ascending=False)
print("******Top {N} {Rarity} cards (by count per deck)*****".format(N=N, Rarity='+'.join(RARITY)))
print("NOTE: OfferRates is the number of cards you would expect in a given 4-pack draft")
print("NOTE: CounterPerOffer also corrects for possible Decks so faction frequency is accounted for")
print(top_common_cards[CARD_COUNT_DISPLAY_COLS].head(N))
print("\n")
# Analyze the top cards picked cards
top_picked_cards = card_counts[card_counts['Rarity'].isin(RARITY)].sort_values('CountPerOffer', ascending=False)
print("******Top {N} {Rarity} cards (by count per offer*)*****".format(N=N, Rarity='+'.join(RARITY)))
print("NOTE: OfferRates is the number of cards you would expect in a given 4-pack draft")
print("NOTE: CountPerOfferDeck also corrects for possible Decks so faction frequency is accounted for")
print(top_picked_cards[CARD_COUNT_DISPLAY_COLS].head(N))
print("\n")
# Analyze the least picked rare cards
least_picked_cards = card_counts[card_counts['Rarity'].isin(RARITY)].sort_values('CountPerOffer', ascending=True)
print("******Bottom {N} {Rarity} cards (by count per offer*)*****".format(N=N, Rarity='+'.join(RARITY)))
print("NOTE: OfferRates is the number of cards you would expect in a given 4-pack draft")
print("NOTE: CountPerOfferDeck also corrects for possible Decks so faction frequency is accounted for")
print(least_picked_cards[CARD_COUNT_DISPLAY_COLS].head(N))
print("\n")
# Analyze the top splashed cards
N = 20
splash_cards = all_cards[all_cards['IsSplash']].copy()
n_splash_decks = len(splash_cards['DeckId'].unique())
top_splashed_cards = splash_cards[splash_cards.Type != 'Power']['Name'].value_counts()
print("******Top {N} splashed for cards****".format(N=N))
print("(out of {n_splash_decks} decks that splashed)".format(n_splash_decks=n_splash_decks))
print(top_splashed_cards.head(N))
print("\n")
# Analyz all the market cards in play
market_cards = card_counts[card_counts['MarketAccess']].sort_values('Count', ascending=False)
print("*******ALL MARKET ACCESS CARDS********")
print("NOTE: OfferRates is the number of cards you would expect in a given 4-pack draft")
print("NOTE: CountPerOfferDeck also corrects for possible Decks so faction frequency is accounted for")
print(market_cards[CARD_COUNT_DISPLAY_COLS])
print("\n")
# Top combat tricks
N = 20
print("******Top {N} Fast spells (by count)*****".format(N=N))
print(all_cards[all_cards['Type'] == 'Fast Spell']['Name'].value_counts().head(N))
print("\n")
top_fastspell_cards = card_counts[card_counts['Type'] == 'Fast Spell'].sort_values('CountPerDeck', ascending=False)
print("******Top {N} Fast spells (by count per deck)*****".format(N=N))
print(top_fastspell_cards[CARD_COUNT_DISPLAY_COLS].head(N))
print("\n")
# Top stealth units
N = 20
print("******Top {N} Stealth Units (by count)*****".format(N=N))
print(all_cards[(all_cards['Type'] == 'Unit') & (all_cards['CardText'].str.contains('<b>Stealth</b>'))]['Name'].value_counts().head(N))
print("\n")
top_stealth_cards = card_counts[(card_counts['Type'] == 'Unit') & (card_counts['CardText'].str.contains('<b>Stealth</b>'))].sort_values('CountPerDeck',
ascending=False)
print("******Top {N} Stealh Units (by count per deck)*****".format(N=N))
print(top_stealth_cards[CARD_COUNT_DISPLAY_COLS].head(N))
print("\n")
# List out all "out of faction" cards
out_of_faction_cards = pd.DataFrame(
[x for i, x in all_cards.iterrows() if (not set(x.Faction).issubset(x.DeckMainFaction + x.DeckSplashFaction)) and (x.Faction is not 'None')])
print("Out of faction most played cards")
if not out_of_faction_cards.empty:
print(out_of_faction_cards['Name'].value_counts())
print("Out of faction card Contributors")
print(df_7win_decks.loc[out_of_faction_cards['DeckId']]['Contributor'].value_counts())
else:
print("No out of faction cards played!!!")
# ********** UNIT ANALYSIS **************
# Look at the unit-counts by player
df_7win_decks['UnitCount'] = df_7win_decks.Deck.apply(lambda x: x.types())['Unit']
units_by_player = df_7win_decks.groupby('Contributor')['UnitCount'].describe()
units_by_player[units_by_player['count'] >= 3].sort_values('mean')[['count', 'mean', 'min', 'max']]
# Look at the unit-counts by player
df_7win_decks['UnitCount'] = df_7win_decks.Deck.apply(lambda x: x.types())['Unit']
units_by_faction = df_7win_decks.groupby('MainFaction')['UnitCount'].describe()
print("**** Average unit count by deck main-faction (minimum 3 decks)")
print(units_by_faction[units_by_faction['count'] >= 3].sort_values('mean')[['count', 'mean', 'min', 'max']])
# ********** DECK POWER ANALYSIS **************
# Contributor Deck Power (Type==Power)
df_7win_decks['NumPower'] = df_7win_decks.Deck.apply(lambda x: x.types())['Power']
power_by_player = df_7win_decks.groupby('Contributor')['NumPower'].describe()[['count', 'mean', 'min', 'max']]
print("**** Power played by player (card type = Power) *****")
print(power_by_player[power_by_player['count'] >= 3].sort_values('mean')[['count', 'mean', 'min', 'max']])
# Contributor Deck Power (Effective Power)
df_7win_decks['EffectivePower'] = df_7win_decks.index.map(all_cards.groupby('DeckId')['PowerCount'].sum())
powercount_by_player = df_7win_decks.groupby('Contributor')['EffectivePower'].describe()[['count', 'mean', 'min', 'max']]
print("**** Power played by player (effective power*) *****")
print("NOTE: <=2 cost or less spells counted as power e.g. Seek Power/Etchings/BluePrints etc.")
print(powercount_by_player[powercount_by_player['count'] >= 3].sort_values('mean'))
# Contributor Deck Power (Type==Power vs. Effective Power)
power_by_player_merged = pd.merge(power_by_player, powercount_by_player, left_index=True, right_index=True,
suffixes=('_type', '_effective'))
print(power_by_player_merged[power_by_player_merged['count_type'] >= 3].sort_values('mean_effective'))
# MainFaction Deck Power (Type==Power vs. Effective Power)
powercount_by_deck_main_faction = df_7win_decks.groupby('MainFaction')['EffectivePower'].describe()[['count', 'mean', 'min', 'max']]
print("**** Power played by deck main factions (effective power*) *****")
print("NOTE: <=2 cost or less spells counted as power e.g. Seek Power/Etchings/BluePrints etc.")
print(powercount_by_deck_main_faction[powercount_by_deck_main_faction['count'] >= 3].sort_values('mean'))
# Contrbutor power vs. Effective power
power_by_player_subset = power_by_player_merged[power_by_player_merged['count_type'] >= 3]
plt.figure()
plt.scatter(power_by_player_subset['mean_type'].values, power_by_player_subset['mean_effective'].values, label=power_by_player_subset.index)
for name in power_by_player_subset.index:
plt.annotate(name, (power_by_player_subset.loc[name]['mean_type'], power_by_player_subset.loc[name]['mean_effective']))
plt.grid('on')
plt.xlabel('Power (type) cards')
plt.ylabel('Effective power')
plt.show()
# Plot amount of power
MIN_DECK = 10
deck_count_by_faction = df_7win_decks['MainFaction'].value_counts()
deck_power_by_faction = df_7win_decks.groupby('MainFaction')['EffectivePower'].value_counts().sort_index()
normalized_deck_power_by_faction = pd.DataFrame()
for faction, count in deck_count_by_faction.items():
if count >= MIN_DECK:
normalized_deck_power_by_faction[faction] = deck_power_by_faction.loc[faction] / (float(count)) * 100.0
first_color = eternal.plot.get_faction_colors([x[0] for x in normalized_deck_power_by_faction.columns])
second_color = eternal.plot.get_faction_colors([x[1] for x in normalized_deck_power_by_faction.columns])
ax = normalized_deck_power_by_faction.plot(grid='on', color=first_color, linewidth=6, alpha=0.5)
normalized_deck_power_by_faction.plot(grid='on', color=second_color, linewidth=1, ax=ax)
plt.ylabel('Percentage of decks')
plt.title('Effective power by deck main faction')
# ********** CARD-COST ANALYSIS **************
# Mean card cost by deck faction
all_cards[all_cards.Type != 'Power'].groupby('DeckMainFaction')['Cost'].mean()
# all_cards[ all_cards.Type != 'Power' ].boxplot( 'Cost', 'DeckMainFaction' ) # Box plot (not that informative)
# Plot curve by deck faction
MIN_DECK = 10
curve_by_faction = all_cards[all_cards.Type != 'Power'].groupby(['DeckMainFaction', 'Cost'])['Name'].count()
deck_count_by_faction = df_7win_decks['MainFaction'].value_counts()
normalized_curve_by_faction = pd.DataFrame()
for faction, count in deck_count_by_faction.items():
if count >= MIN_DECK:
normalized_curve_by_faction[faction] = curve_by_faction.loc[faction] / (float(count))
first_color = eternal.plot.get_faction_colors([x[0] for x in normalized_curve_by_faction.columns])
second_color = eternal.plot.get_faction_colors([x[1] for x in normalized_curve_by_faction.columns])
ax = normalized_curve_by_faction.plot(grid='on', color=first_color, linewidth=6, alpha=0.5)
normalized_curve_by_faction.plot(grid='on', color=second_color, linewidth=1, ax=ax)
plt.ylabel('Number of cards')
plt.title('Average curve by deck main faction')
print("**** Average card cost by deck main faction ****")
print("(for all main-faction pairs with at least {MIN_DECK} decks)".format(MIN_DECK=MIN_DECK))
print(all_cards[all_cards.Type != 'Power'].groupby('DeckMainFaction')['Cost'].mean()[normalized_curve_by_faction.columns].sort_values())
# Augment 7win list with average unit Attack / Health
df_unit_stats = df_7win_decks.Deck.apply(lambda x: x.unit_stats())
df_7win_decks['Attack'] = df_unit_stats.Attack
df_7win_decks['Health'] = df_unit_stats.Health
# Plot the unit-health by faction
units = all_cards[all_cards.Type == 'Unit'].copy()
units['Faction'] = units.Influence.apply(eternal.card.influence_to_faction)
units_health_by_faction = units.pivot_table(index='Faction', columns=['Health'], values='Name', aggfunc='count')
sorted_units_faction_by_health = units_health_by_faction.loc[units_health_by_faction.sum(axis=1).sort_values(ascending=False).index].transpose()
colors = eternal.plot.get_faction_colors(sorted_units_faction_by_health.columns)
sorted_units_faction_by_health.plot(kind='bar', stacked=True, grid=True, color=colors, legend=True)
##******** BEST AND WORST DECKS **************
all_cards['CountPerOffer'] = all_cards.index.map(card_counts['CountPerOffer'])
all_cards['CountPerOfferDeck'] = all_cards.index.map(card_counts['CountPerOfferDeck'])
best_decks = df_7win_decks.loc[all_cards.groupby('DeckId')['CountPerOfferDeck'].mean().nlargest(10).index]
worst_decks = df_7win_decks.loc[all_cards.groupby('DeckId')['CountPerOfferDeck'].mean().nsmallest(10).index]
def plot_contributor_faction_usage():
contributor_faction_counts = all_cards[all_cards.Type != 'Power'].groupby('Contributor')[['F', 'T', 'J', 'P', 'S']].sum()
contributor_faction_percent = contributor_faction_counts.div(contributor_faction_counts.sum(axis=1), axis=0)
contributor_faction_percent['count'] = contributor_faction_percent.index.map(df_7win_decks['Contributor'].value_counts())
contributor_faction_percent['deviation'] = (contributor_faction_percent[['F', 'T', 'J', 'P', 'S']] - 0.2).abs().sum(axis=1)
divergence = lambda x: scipy.stats.entropy(x.values, [0.2, 0.2, 0.2, 0.2, 0.2])
contributor_faction_percent['divergence'] = contributor_faction_percent[['F', 'T', 'J', 'P', 'S']].apply(divergence, axis=1)
contributor_faction_percent['top_faction_percent'] = (contributor_faction_percent[['F', 'T', 'J', 'P', 'S']]).max(axis=1)
contributor_faction_percent['top_faction'] = (contributor_faction_percent[['F', 'T', 'J', 'P', 'S']]).idxmax(axis=1)
subset = contributor_faction_percent[contributor_faction_percent['count'] >= 7]
plt.figure()
plt.plot(subset['divergence'], subset['top_faction_percent'], 'ob')
for name, data in subset.iterrows():
top_faction = data['top_faction']
color = eternal.plot.get_faction_colors(top_faction)
plt.annotate(f'{name} ({top_faction})', (data['divergence'], data['top_faction_percent']), color=color[0])
plt.grid('on')
plt.xlabel('Divergence (0 = generalize, 1.0 = specialist)')
plt.ylabel('Maximum faction (%)')
plt.title('Generalist vs. specialist')
def plot_inscribe_faction_usage(FACTION):
faction_cards = all_cards[(all_cards.Type != 'Power') & (all_cards.Influence.str.contains(FACTION))].copy()
faction_cards['IsInscribe'] = faction_cards.CardText.str.contains('<b>Inscribe</b>')
faction_cards_by_deck = faction_cards.groupby('DeckId')['IsInscribe']
faction_deck_stats = pd.DataFrame({'total': faction_cards_by_deck.count(), 'inscribe': faction_cards_by_deck.sum()})
faction_deck_stats['percent'] = faction_deck_stats['inscribe'] / faction_deck_stats['total'] * 100.0
plt.figure()
ax = plt.subplot(2, 1, 1)
faction_deck_stats.boxplot(column=['percent'], by=['total'], ax=ax)
plt.ylabel('Percent (%) Inscribe')
plt.xlabel(f'Number of {FACTION} cards')
plt.title(None)
ax = plt.subplot(2, 1, 2)
faction_deck_stats['total'].value_counts().sort_index().plot(kind='bar', ax=ax)
plt.grid('on')
plt.ylabel('Number of decks')
plt.xlabel(f'Number of {FACTION} cards')
# Plot the unit-health by faction
def plot_unit_health_by_faction():
plt.figure()
for i, COST in enumerate([3, 5]):
ax = plt.subplot(2, 1, i + 1)
stealth_units = all_cards[(all_cards.Type == 'Unit') & (all_cards.CardText.str.contains('<b>Stealth</b>')) & (all_cards.Cost == COST)].copy()
stealth_units['Faction'] = stealth_units.Influence.apply(eternal.card.influence_to_faction)
stealth_units_health_by_faction = stealth_units.pivot_table(index='Faction', columns=['Health'], values='Name', aggfunc='count')
sorted_stealth_units_faction_by_health = stealth_units_health_by_faction.loc[
stealth_units_health_by_faction.sum(axis=1).sort_values(ascending=False).index].transpose()
colors = eternal.plot.get_faction_colors(sorted_stealth_units_faction_by_health.columns)
sorted_stealth_units_faction_by_health.plot(kind='bar', stacked=True, grid=True, color=colors, legend=True, ax=ax)
plt.ylabel('Count of units')
plt.title('Health of {COST}-cost *Stealth* units'.format(COST=COST))
# Plot the main popularity over time
def plot_faction_popularity(faction_type='MainFaction', n_deck_window=100):
"""
Args:
faction_type: 'MainFaction', 'SplashFaction', or 'MainFaction + SplashFaction'
:return:
"""
plt.figure()
if faction_type == 'MainFaction':
deck_factions = df_7win_decks['MainFaction'].str
elif faction_type == 'SplashFaction':
deck_factions = df_7win_decks['SplashFaction'].str
elif faction_type == 'MainFaction + SplashFaction':
deck_factions = (df_7win_decks['SplashFaction'] + df_7win_decks['SplashFaction']).str
for faction in eternal.card.FACTIONS:
color = eternal.plot.get_faction_colors(faction)
plt.plot(deck_factions.contains(faction).rolling(n_deck_window).mean() * 100.0, color=color[0], label=faction)
average_n_factions = deck_factions.len().mean()
plt.legend()
plt.title(f'Rolling {n_deck_window}-deck average of {faction_type} popularity')
plt.grid('on')
plt.ylabel('Percentage of decks')
xlim = plt.xlim()
plt.plot(plt.xlim(), [average_n_factions / 5.0 * 100] * 2, '--', color=(0.5, 0.5, 0.5))
plt.xlim(xlim)
plt.ylim(0.0, plt.ylim()[1])
# Plot the faction popularity over time (multi-faction)
def plot_multifaction_popularity():
MIN_DECK = 10
N_DECK_WINDOW = 100
deck_count_by_faction = df_7win_decks['MainFaction'].value_counts()
deck_popularity_by_faction = pd.DataFrame()
for faction, count in deck_count_by_faction.items():
if count >= MIN_DECK:
deck_popularity_by_faction[faction] = (df_7win_decks['MainFaction'] == faction).rolling(N_DECK_WINDOW).mean() * 100.0
plt.figure()
faction_order = deck_popularity_by_faction.iloc[-1].sort_values(ascending=False).index
ax = plt.subplot(2, 1, 1)
df_popularity = deck_popularity_by_faction[faction_order[:5]]
first_color = eternal.plot.get_faction_colors([x[0] for x in df_popularity.columns])
second_color = eternal.plot.get_faction_colors([x[1] for x in df_popularity.columns])
df_popularity.plot(grid='on', color=first_color, linewidth=6, alpha=0.5, ax=ax)
df_popularity.plot(grid='on', color=second_color, linewidth=1, ax=ax)
plt.ylabel('Percentage of decks')
plt.title('Deck popularity (top 1-5 popular factions today)')
ax.get_legend().remove()
ylim = plt.ylim()
ax = plt.subplot(2, 1, 2)
df_popularity = deck_popularity_by_faction[faction_order[5:]]
first_color = eternal.plot.get_faction_colors([x[0] for x in df_popularity.columns])
second_color = eternal.plot.get_faction_colors([x[1] for x in df_popularity.columns])
df_popularity.plot(grid='on', color=first_color, linewidth=6, alpha=0.5, ax=ax)
df_popularity.plot(grid='on', color=second_color, linewidth=1, ax=ax)
plt.ylabel('Percentage of decks')
plt.title('Deck popularity (top 6-10 popular factions today)')
ax.get_legend().remove()
plt.ylim(ylim)
def power_sink_summary():
""""Analyze decks using Sketches or Rune"""
cards_sketches = CARDS_DATA[CARDS_DATA['Name'].str.endswith('Sketch')]
cards_runes = CARDS_DATA[CARDS_DATA['Name'].str.startswith('Rune of')]
cards_both = pd.concat([cards_runes, cards_sketches])
power_sink_summary = []
for id, sketch in cards_both.iterrows():
n_decks = all_cards[all_cards.index.isin([id])].DeckId.unique().size
power_sink_summary.append([sketch.Name, n_decks])
power_sink_summary.append(['Any Rune', all_cards[all_cards.index.isin(cards_runes.index)].DeckId.unique().size])
power_sink_summary.append(['Any Sketch', all_cards[all_cards.index.isin(cards_sketches.index)].DeckId.unique().size])
power_sink_summary.append(['Any Rune or Sketch', all_cards[all_cards.index.isin(cards_both.index)].DeckId.unique().size])
df_power_sink_summary = pd.DataFrame(power_sink_summary, columns=['Scenario', 'NumDecks'])
df_power_sink_summary['PercentageDecks'] = df_power_sink_summary['NumDecks'] / len(df_7win_decks) * 100.0
print("**** Percentage of decks containing power sinks (Sketches and/or Runes)****")
print(df_power_sink_summary)
def display_cards_in_contention(*args,
stats=['Name', 'PossibleDecks', 'OfferRate', 'Count', 'CountPerDeck', 'CountPerOffer', 'CountPerOfferDeck']):
"""
Args:
*args: One (or more) strings to use to search the names of cards to display (case insenstive)
stats: (optional) List of columns to display
"""
re_string = '|'.join(args)
print(card_counts[card_counts['Name'].str.contains(re_string, flags=re.IGNORECASE)][stats])
display_cards_in_contention('open', 'protector')
# Dump outputs
output_order = ['Faction', 'Count', 'PossibleDecks', 'CountPerDeck', 'SetNumber', 'EternalID', 'OfferRate', 'CountPerOffer', 'CountPerOfferDeck', 'Rarity',
'Type', 'Name', 'CardText', 'Cost', 'Influence', 'Attack', 'Health', 'ImageUrl', 'DetailsUrl', 'DeckBuildable', 'UnitType', 'MarketAccess']
card_counts[output_order].to_csv('card_counts.csv')
# Additional analysis
# plot_unit_health_by_faction()
plot_faction_popularity('MainFaction', 100)
plot_faction_popularity('SplashFaction', 100)
plot_contributor_faction_usage()
|
[
"matplotlib.pyplot.title",
"pandas.read_csv",
"matplotlib.pyplot.figure",
"pandas.DataFrame",
"pandas.merge",
"pandas.concat",
"matplotlib.pyplot.show",
"matplotlib.pyplot.ylim",
"matplotlib.pyplot.legend",
"pandas.Series",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.grid",
"matplotlib.pyplot.subplot",
"matplotlib.pyplot.xlim",
"json.load",
"matplotlib.pyplot.annotate",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.scatter",
"matplotlib.pyplot.xlabel"
] |
[((4916, 4967), 'pandas.Series', 'pd.Series', ([], {'index': 'card_counts.index', 'dtype': '"""float64"""'}), "(index=card_counts.index, dtype='float64')\n", (4925, 4967), True, 'import pandas as pd\n'), ((12576, 12696), 'pandas.merge', 'pd.merge', (['power_by_player', 'powercount_by_player'], {'left_index': '(True)', 'right_index': '(True)', 'suffixes': "('_type', '_effective')"}), "(power_by_player, powercount_by_player, left_index=True,\n right_index=True, suffixes=('_type', '_effective'))\n", (12584, 12696), True, 'import pandas as pd\n'), ((13471, 13483), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (13481, 13483), True, 'import matplotlib.pyplot as plt\n'), ((13488, 13637), 'matplotlib.pyplot.scatter', 'plt.scatter', (["power_by_player_subset['mean_type'].values", "power_by_player_subset['mean_effective'].values"], {'label': 'power_by_player_subset.index'}), "(power_by_player_subset['mean_type'].values,\n power_by_player_subset['mean_effective'].values, label=\n power_by_player_subset.index)\n", (13499, 13637), True, 'import matplotlib.pyplot as plt\n'), ((13807, 13821), 'matplotlib.pyplot.grid', 'plt.grid', (['"""on"""'], {}), "('on')\n", (13815, 13821), True, 'import matplotlib.pyplot as plt\n'), ((13826, 13858), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Power (type) cards"""'], {}), "('Power (type) cards')\n", (13836, 13858), True, 'import matplotlib.pyplot as plt\n'), ((13863, 13892), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Effective power"""'], {}), "('Effective power')\n", (13873, 13892), True, 'import matplotlib.pyplot as plt\n'), ((13897, 13907), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (13905, 13907), True, 'import matplotlib.pyplot as plt\n'), ((14176, 14190), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (14188, 14190), True, 'import pandas as pd\n'), ((14809, 14842), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Percentage of decks"""'], {}), "('Percentage of decks')\n", (14819, 14842), True, 'import matplotlib.pyplot as plt\n'), ((14847, 14896), 'matplotlib.pyplot.title', 'plt.title', (['"""Effective power by deck main faction"""'], {}), "('Effective power by deck main faction')\n", (14856, 14896), True, 'import matplotlib.pyplot as plt\n'), ((15457, 15471), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (15469, 15471), True, 'import pandas as pd\n'), ((16052, 16081), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Number of cards"""'], {}), "('Number of cards')\n", (16062, 16081), True, 'import matplotlib.pyplot as plt\n'), ((16086, 16133), 'matplotlib.pyplot.title', 'plt.title', (['"""Average curve by deck main faction"""'], {}), "('Average curve by deck main faction')\n", (16095, 16133), True, 'import matplotlib.pyplot as plt\n'), ((642, 677), 'pandas.read_csv', 'pd.read_csv', (['"""7win_decks_set12.csv"""'], {}), "('7win_decks_set12.csv')\n", (653, 677), True, 'import pandas as pd\n'), ((13683, 13806), 'matplotlib.pyplot.annotate', 'plt.annotate', (['name', "(power_by_player_subset.loc[name]['mean_type'], power_by_player_subset.loc[\n name]['mean_effective'])"], {}), "(name, (power_by_player_subset.loc[name]['mean_type'],\n power_by_player_subset.loc[name]['mean_effective']))\n", (13695, 13806), True, 'import matplotlib.pyplot as plt\n'), ((18874, 18886), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (18884, 18886), True, 'import matplotlib.pyplot as plt\n'), ((18895, 18962), 'matplotlib.pyplot.plot', 'plt.plot', (["subset['divergence']", "subset['top_faction_percent']", '"""ob"""'], {}), "(subset['divergence'], subset['top_faction_percent'], 'ob')\n", (18903, 18962), True, 'import matplotlib.pyplot as plt\n'), ((19246, 19260), 'matplotlib.pyplot.grid', 'plt.grid', (['"""on"""'], {}), "('on')\n", (19254, 19260), True, 'import matplotlib.pyplot as plt\n'), ((19269, 19328), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Divergence (0 = generalize, 1.0 = specialist)"""'], {}), "('Divergence (0 = generalize, 1.0 = specialist)')\n", (19279, 19328), True, 'import matplotlib.pyplot as plt\n'), ((19337, 19370), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Maximum faction (%)"""'], {}), "('Maximum faction (%)')\n", (19347, 19370), True, 'import matplotlib.pyplot as plt\n'), ((19379, 19417), 'matplotlib.pyplot.title', 'plt.title', (['"""Generalist vs. specialist"""'], {}), "('Generalist vs. specialist')\n", (19388, 19417), True, 'import matplotlib.pyplot as plt\n'), ((19995, 20007), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (20005, 20007), True, 'import matplotlib.pyplot as plt\n'), ((20021, 20041), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(1)', '(1)'], {}), '(2, 1, 1)\n', (20032, 20041), True, 'import matplotlib.pyplot as plt\n'), ((20126, 20160), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Percent (%) Inscribe"""'], {}), "('Percent (%) Inscribe')\n", (20136, 20160), True, 'import matplotlib.pyplot as plt\n'), ((20169, 20209), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['f"""Number of {FACTION} cards"""'], {}), "(f'Number of {FACTION} cards')\n", (20179, 20209), True, 'import matplotlib.pyplot as plt\n'), ((20218, 20233), 'matplotlib.pyplot.title', 'plt.title', (['None'], {}), '(None)\n', (20227, 20233), True, 'import matplotlib.pyplot as plt\n'), ((20247, 20267), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(1)', '(2)'], {}), '(2, 1, 2)\n', (20258, 20267), True, 'import matplotlib.pyplot as plt\n'), ((20364, 20378), 'matplotlib.pyplot.grid', 'plt.grid', (['"""on"""'], {}), "('on')\n", (20372, 20378), True, 'import matplotlib.pyplot as plt\n'), ((20387, 20416), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Number of decks"""'], {}), "('Number of decks')\n", (20397, 20416), True, 'import matplotlib.pyplot as plt\n'), ((20425, 20465), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['f"""Number of {FACTION} cards"""'], {}), "(f'Number of {FACTION} cards')\n", (20435, 20465), True, 'import matplotlib.pyplot as plt\n'), ((20553, 20565), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (20563, 20565), True, 'import matplotlib.pyplot as plt\n'), ((21878, 21890), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (21888, 21890), True, 'import matplotlib.pyplot as plt\n'), ((22555, 22567), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (22565, 22567), True, 'import matplotlib.pyplot as plt\n'), ((22576, 22655), 'matplotlib.pyplot.title', 'plt.title', (['f"""Rolling {n_deck_window}-deck average of {faction_type} popularity"""'], {}), "(f'Rolling {n_deck_window}-deck average of {faction_type} popularity')\n", (22585, 22655), True, 'import matplotlib.pyplot as plt\n'), ((22664, 22678), 'matplotlib.pyplot.grid', 'plt.grid', (['"""on"""'], {}), "('on')\n", (22672, 22678), True, 'import matplotlib.pyplot as plt\n'), ((22687, 22720), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Percentage of decks"""'], {}), "('Percentage of decks')\n", (22697, 22720), True, 'import matplotlib.pyplot as plt\n'), ((22736, 22746), 'matplotlib.pyplot.xlim', 'plt.xlim', ([], {}), '()\n', (22744, 22746), True, 'import matplotlib.pyplot as plt\n'), ((22851, 22865), 'matplotlib.pyplot.xlim', 'plt.xlim', (['xlim'], {}), '(xlim)\n', (22859, 22865), True, 'import matplotlib.pyplot as plt\n'), ((23168, 23182), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (23180, 23182), True, 'import pandas as pd\n'), ((23421, 23433), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (23431, 23433), True, 'import matplotlib.pyplot as plt\n'), ((23542, 23562), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(1)', '(1)'], {}), '(2, 1, 1)\n', (23553, 23562), True, 'import matplotlib.pyplot as plt\n'), ((23994, 24027), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Percentage of decks"""'], {}), "('Percentage of decks')\n", (24004, 24027), True, 'import matplotlib.pyplot as plt\n'), ((24036, 24097), 'matplotlib.pyplot.title', 'plt.title', (['"""Deck popularity (top 1-5 popular factions today)"""'], {}), "('Deck popularity (top 1-5 popular factions today)')\n", (24045, 24097), True, 'import matplotlib.pyplot as plt\n'), ((24146, 24156), 'matplotlib.pyplot.ylim', 'plt.ylim', ([], {}), '()\n', (24154, 24156), True, 'import matplotlib.pyplot as plt\n'), ((24171, 24191), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(1)', '(2)'], {}), '(2, 1, 2)\n', (24182, 24191), True, 'import matplotlib.pyplot as plt\n'), ((24623, 24656), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Percentage of decks"""'], {}), "('Percentage of decks')\n", (24633, 24656), True, 'import matplotlib.pyplot as plt\n'), ((24665, 24727), 'matplotlib.pyplot.title', 'plt.title', (['"""Deck popularity (top 6-10 popular factions today)"""'], {}), "('Deck popularity (top 6-10 popular factions today)')\n", (24674, 24727), True, 'import matplotlib.pyplot as plt\n'), ((24769, 24783), 'matplotlib.pyplot.ylim', 'plt.ylim', (['ylim'], {}), '(ylim)\n', (24777, 24783), True, 'import matplotlib.pyplot as plt\n'), ((25047, 25087), 'pandas.concat', 'pd.concat', (['[cards_runes, cards_sketches]'], {}), '([cards_runes, cards_sketches])\n', (25056, 25087), True, 'import pandas as pd\n'), ((25724, 25790), 'pandas.DataFrame', 'pd.DataFrame', (['power_sink_summary'], {'columns': "['Scenario', 'NumDecks']"}), "(power_sink_summary, columns=['Scenario', 'NumDecks'])\n", (25736, 25790), True, 'import pandas as pd\n'), ((3590, 3604), 'json.load', 'json.load', (['fin'], {}), '(fin)\n', (3599, 3604), False, 'import json\n'), ((19131, 19242), 'matplotlib.pyplot.annotate', 'plt.annotate', (['f"""{name} ({top_faction})"""', "(data['divergence'], data['top_faction_percent'])"], {'color': 'color[0]'}), "(f'{name} ({top_faction})', (data['divergence'], data[\n 'top_faction_percent']), color=color[0])\n", (19143, 19242), True, 'import matplotlib.pyplot as plt\n'), ((20625, 20649), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(1)', '(i + 1)'], {}), '(2, 1, i + 1)\n', (20636, 20649), True, 'import matplotlib.pyplot as plt\n'), ((21487, 21515), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Count of units"""'], {}), "('Count of units')\n", (21497, 21515), True, 'import matplotlib.pyplot as plt\n'), ((22764, 22774), 'matplotlib.pyplot.xlim', 'plt.xlim', ([], {}), '()\n', (22772, 22774), True, 'import matplotlib.pyplot as plt\n'), ((22888, 22898), 'matplotlib.pyplot.ylim', 'plt.ylim', ([], {}), '()\n', (22896, 22898), True, 'import matplotlib.pyplot as plt\n'), ((2930, 2975), 'pandas.Series', 'pd.Series', (["{'Faction': x[0], 'Count': x.size}"], {}), "({'Faction': x[0], 'Count': x.size})\n", (2939, 2975), True, 'import pandas as pd\n')]
|
# Generated by Django 2.2.2 on 2019-06-20 11:37
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('part', '0010_auto_20190620_2135'),
]
operations = [
migrations.AddField(
model_name='part',
name='revision',
field=models.CharField(blank=True, help_text='Part revision or version number', max_length=100),
),
]
|
[
"django.db.models.CharField"
] |
[((331, 424), 'django.db.models.CharField', 'models.CharField', ([], {'blank': '(True)', 'help_text': '"""Part revision or version number"""', 'max_length': '(100)'}), "(blank=True, help_text='Part revision or version number',\n max_length=100)\n", (347, 424), False, 'from django.db import migrations, models\n')]
|
#!/usr/bin/env python3
import argparse
class Rectangle:
def __init__(self, x, y, w, h):
self.x = x
self.y = y
self.w = w
self.h = h
def __repr__(self):
return f'DRAW_RECTANGLE {self.x},{self.y},{self.w},{self.h}'
@staticmethod
def spawn(start, delta_x, delta_y, end_x, end_y):
print(f'SET_WIDTH {end_x}')
print(f'SET_HEIGHT {end_y}')
for x in range(start.x, end_x, delta_x):
for y in range(start.y, end_y, delta_y):
r = Rectangle(x, y, start.w, start.h)
print(str(r))
print(f'RENDER output.bmp')
class Triangle:
def __init__(self, x1, y1, x2, y2, x3, y3):
self.x1 = x1
self.y1 = y1
self.x2 = x2
self.y2 = y2
self.x3 = x3
self.y3 = y3
def __repr__(self):
return f'DRAW_RECTANGLE {self.x1},{self.y1},{self.x2},{self.y2},{self.x3},{self.y3}'
def _parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('pattern', type=str, choices=['squared'], help='shape pattern')
parser.add_argument('--width', type=int, default=500, help='image width')
parser.add_argument('--height', type=int, default=500, help='image height')
parser.add_argument('--size', type=int, default=25, help='object size')
return parser.parse_args()
def generate_inputs(pattern, width, height, size):
if pattern == 'squared':
Rectangle.spawn(Rectangle(int(size / 2), int(size / 2), size, size), 2 * size, 2 * size, width, height)
if __name__ == '__main__':
args = _parse_args()
generate_inputs(args.pattern, args.width, args.height, args.size)
|
[
"argparse.ArgumentParser"
] |
[((976, 1001), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (999, 1001), False, 'import argparse\n')]
|
import json
from emotion import ProcessEmotions
f = open("reddit.json",encoding = 'utf-8')
loaded = json.load(f)
data = loaded["data"]["children"]
text = map(lambda x: x["data"]["title"], data)
res = map(ProcessEmotions, text)
for item in res:
if item:
for i in item:
if i["valid"]:
print(i["emotion"], i["valid"])
else:
print(i["emotion"], i["text"])
else:
print("No item")
|
[
"json.load"
] |
[((101, 113), 'json.load', 'json.load', (['f'], {}), '(f)\n', (110, 113), False, 'import json\n')]
|
"""Sensor from an SQL Query."""
from __future__ import annotations
from datetime import date
import decimal
import logging
import sqlalchemy
from sqlalchemy.exc import SQLAlchemyError
from sqlalchemy.orm import scoped_session, sessionmaker
import voluptuous as vol
from homeassistant.components.recorder import CONF_DB_URL, DEFAULT_DB_FILE, DEFAULT_URL
from homeassistant.components.sensor import (
PLATFORM_SCHEMA as PARENT_PLATFORM_SCHEMA,
SensorEntity,
)
from homeassistant.config_entries import SOURCE_IMPORT, ConfigEntry
from homeassistant.const import CONF_NAME, CONF_UNIT_OF_MEASUREMENT, CONF_VALUE_TEMPLATE
from homeassistant.core import HomeAssistant
from homeassistant.exceptions import TemplateError
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.device_registry import DeviceEntryType
from homeassistant.helpers.entity import DeviceInfo
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from homeassistant.helpers.template import Template
from homeassistant.helpers.typing import ConfigType, DiscoveryInfoType
from .const import CONF_COLUMN_NAME, CONF_QUERIES, CONF_QUERY, DB_URL_RE, DOMAIN
_LOGGER = logging.getLogger(__name__)
def redact_credentials(data: str) -> str:
"""Redact credentials from string data."""
return DB_URL_RE.sub("//****:****@", data)
_QUERY_SCHEME = vol.Schema(
{
vol.Required(CONF_COLUMN_NAME): cv.string,
vol.Required(CONF_NAME): cv.string,
vol.Required(CONF_QUERY): cv.string,
vol.Optional(CONF_UNIT_OF_MEASUREMENT): cv.string,
vol.Optional(CONF_VALUE_TEMPLATE): cv.template,
}
)
PLATFORM_SCHEMA = PARENT_PLATFORM_SCHEMA.extend(
{vol.Required(CONF_QUERIES): [_QUERY_SCHEME], vol.Optional(CONF_DB_URL): cv.string}
)
async def async_setup_platform(
hass: HomeAssistant,
config: ConfigType,
async_add_entities: AddEntitiesCallback,
discovery_info: DiscoveryInfoType | None = None,
) -> None:
"""Set up the SQL sensor platform."""
_LOGGER.warning(
# SQL config flow added in 2022.4 and should be removed in 2022.6
"Configuration of the SQL sensor platform in YAML is deprecated and "
"will be removed in Home Assistant 2022.6; Your existing configuration "
"has been imported into the UI automatically and can be safely removed "
"from your configuration.yaml file"
)
default_db_url = DEFAULT_URL.format(
hass_config_path=hass.config.path(DEFAULT_DB_FILE)
)
for query in config[CONF_QUERIES]:
new_config = {
CONF_DB_URL: config.get(CONF_DB_URL, default_db_url),
CONF_NAME: query.get(CONF_NAME),
CONF_QUERY: query.get(CONF_QUERY),
CONF_UNIT_OF_MEASUREMENT: query.get(CONF_UNIT_OF_MEASUREMENT),
CONF_VALUE_TEMPLATE: query.get(CONF_VALUE_TEMPLATE),
CONF_COLUMN_NAME: query.get(CONF_COLUMN_NAME),
}
hass.async_create_task(
hass.config_entries.flow.async_init(
DOMAIN,
context={"source": SOURCE_IMPORT},
data=new_config,
)
)
async def async_setup_entry(
hass: HomeAssistant, entry: ConfigEntry, async_add_entities: AddEntitiesCallback
) -> None:
"""Set up the SQL sensor entry."""
db_url: str = entry.options[CONF_DB_URL]
name: str = entry.options[CONF_NAME]
query_str: str = entry.options[CONF_QUERY]
unit: str | None = entry.options.get(CONF_UNIT_OF_MEASUREMENT)
template: str | None = entry.options.get(CONF_VALUE_TEMPLATE)
column_name: str = entry.options[CONF_COLUMN_NAME]
value_template: Template | None = None
if template is not None:
try:
value_template = Template(template)
value_template.ensure_valid()
except TemplateError:
value_template = None
if value_template is not None:
value_template.hass = hass
try:
engine = sqlalchemy.create_engine(db_url)
sessmaker = scoped_session(sessionmaker(bind=engine))
except SQLAlchemyError as err:
_LOGGER.error("Can not open database %s", {redact_credentials(str(err))})
return
# MSSQL uses TOP and not LIMIT
if not ("LIMIT" in query_str.upper() or "SELECT TOP" in query_str.upper()):
query_str = (
query_str.replace("SELECT", "SELECT TOP 1")
if "mssql" in db_url
else query_str.replace(";", " LIMIT 1;")
)
async_add_entities(
[
SQLSensor(
name,
sessmaker,
query_str,
column_name,
unit,
value_template,
entry.entry_id,
)
],
True,
)
class SQLSensor(SensorEntity):
"""Representation of an SQL sensor."""
_attr_icon = "mdi:database-search"
def __init__(
self,
name: str,
sessmaker: scoped_session,
query: str,
column: str,
unit: str | None,
value_template: Template | None,
entry_id: str,
) -> None:
"""Initialize the SQL sensor."""
self._attr_name = name
self._query = query
self._attr_native_unit_of_measurement = unit
self._template = value_template
self._column_name = column
self.sessionmaker = sessmaker
self._attr_extra_state_attributes = {}
self._attr_unique_id = entry_id
self._attr_device_info = DeviceInfo(
entry_type=DeviceEntryType.SERVICE,
identifiers={(DOMAIN, entry_id)},
manufacturer="SQL",
name=name,
)
def update(self) -> None:
"""Retrieve sensor data from the query."""
data = None
self._attr_extra_state_attributes = {}
sess: scoped_session = self.sessionmaker()
try:
result = sess.execute(self._query)
except SQLAlchemyError as err:
_LOGGER.error(
"Error executing query %s: %s",
self._query,
redact_credentials(str(err)),
)
return
_LOGGER.debug("Result %s, ResultMapping %s", result, result.mappings())
for res in result.mappings():
_LOGGER.debug("result = %s", res.items())
data = res[self._column_name]
for key, value in res.items():
if isinstance(value, decimal.Decimal):
value = float(value)
if isinstance(value, date):
value = value.isoformat()
self._attr_extra_state_attributes[key] = value
if data is not None and self._template is not None:
self._attr_native_value = (
self._template.async_render_with_possible_json_value(data, None)
)
else:
self._attr_native_value = data
if data is None:
_LOGGER.warning("%s returned no results", self._query)
sess.close()
|
[
"voluptuous.Optional",
"homeassistant.helpers.entity.DeviceInfo",
"voluptuous.Required",
"sqlalchemy.create_engine",
"sqlalchemy.orm.sessionmaker",
"logging.getLogger",
"homeassistant.helpers.template.Template"
] |
[((1179, 1206), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (1196, 1206), False, 'import logging\n'), ((1389, 1419), 'voluptuous.Required', 'vol.Required', (['CONF_COLUMN_NAME'], {}), '(CONF_COLUMN_NAME)\n', (1401, 1419), True, 'import voluptuous as vol\n'), ((1440, 1463), 'voluptuous.Required', 'vol.Required', (['CONF_NAME'], {}), '(CONF_NAME)\n', (1452, 1463), True, 'import voluptuous as vol\n'), ((1484, 1508), 'voluptuous.Required', 'vol.Required', (['CONF_QUERY'], {}), '(CONF_QUERY)\n', (1496, 1508), True, 'import voluptuous as vol\n'), ((1529, 1567), 'voluptuous.Optional', 'vol.Optional', (['CONF_UNIT_OF_MEASUREMENT'], {}), '(CONF_UNIT_OF_MEASUREMENT)\n', (1541, 1567), True, 'import voluptuous as vol\n'), ((1588, 1621), 'voluptuous.Optional', 'vol.Optional', (['CONF_VALUE_TEMPLATE'], {}), '(CONF_VALUE_TEMPLATE)\n', (1600, 1621), True, 'import voluptuous as vol\n'), ((1699, 1725), 'voluptuous.Required', 'vol.Required', (['CONF_QUERIES'], {}), '(CONF_QUERIES)\n', (1711, 1725), True, 'import voluptuous as vol\n'), ((1744, 1769), 'voluptuous.Optional', 'vol.Optional', (['CONF_DB_URL'], {}), '(CONF_DB_URL)\n', (1756, 1769), True, 'import voluptuous as vol\n'), ((3986, 4018), 'sqlalchemy.create_engine', 'sqlalchemy.create_engine', (['db_url'], {}), '(db_url)\n', (4010, 4018), False, 'import sqlalchemy\n'), ((5532, 5647), 'homeassistant.helpers.entity.DeviceInfo', 'DeviceInfo', ([], {'entry_type': 'DeviceEntryType.SERVICE', 'identifiers': '{(DOMAIN, entry_id)}', 'manufacturer': '"""SQL"""', 'name': 'name'}), "(entry_type=DeviceEntryType.SERVICE, identifiers={(DOMAIN,\n entry_id)}, manufacturer='SQL', name=name)\n", (5542, 5647), False, 'from homeassistant.helpers.entity import DeviceInfo\n'), ((3756, 3774), 'homeassistant.helpers.template.Template', 'Template', (['template'], {}), '(template)\n', (3764, 3774), False, 'from homeassistant.helpers.template import Template\n'), ((4054, 4079), 'sqlalchemy.orm.sessionmaker', 'sessionmaker', ([], {'bind': 'engine'}), '(bind=engine)\n', (4066, 4079), False, 'from sqlalchemy.orm import scoped_session, sessionmaker\n')]
|
import numpy as np
import matplotlib.pyplot as plt
import os
import cv2
from scipy.ndimage import gaussian_filter, median_filter
from matplotlib.animation import FuncAnimation
from perlin_noise import PerlinNoise
##########################################################################################
# FUNCTIONS FOR RENDERING I.E. GO FROM GEOMETRY TO FINAL IMAGES
##########################################################################################
# function: z_disk_props
# function: sarc_list_in_slice_fcn
# function: return_x_y_z_mat
# function: point_in_cyl
# function: binary_box
# function: slice_to_matrix
# function: matrix_gaussian_blur_fcn
# function: matrix_median_blur_fcn
# function: random_val
# function: cloud_image
# function: matrix_to_image
# function: add_perlin_noise
# function: save_img_stil
# function: still_to_avi
# function: ground_truth_movie
##########################################################################################
##########################################################################################
def z_disk_props( sarc_list, is_normal_radius, is_normal_height, avg_radius, avg_height, parameter_radius, parameter_height):
"""Create z disk properties, z disks are modeled as cylinders with radius R and height H. Once cylinder per sarcomere (s1)."""
radius_list = []
height_list = []
for kk in range(0,len(sarc_list)):
if is_normal_radius:
rad = avg_radius + np.random.normal(0,parameter_radius)
else:
rad = avg_radius + (np.random.random(1)[0] - .5) * parameter_radius * 2.0
if is_normal_height:
hei = avg_height + np.random.normal(0,parameter_height)
else:
hei = avg_height + (np.random.random(1)[0] - .5) * parameter_height * 2.0
radius_list.append(rad)
height_list.append(hei)
return radius_list, height_list
##########################################################################################
def sarc_list_in_slice_fcn(sarc_list, radius_list, height_list, z_lower, z_upper):
"""Check to see if sarcomere is within a slice in the z dimension."""
sarc_list_in_slice = []
radius_list_in_slice = []
height_list_in_slice = []
num_sarc = len(sarc_list)
for kk in range(0,num_sarc):
z = 0.5*( sarc_list[kk][0][2] + sarc_list[kk][1][2] )
if z > z_lower and z < z_upper:
sarc_list_in_slice.append(sarc_list[kk])
radius_list_in_slice.append(radius_list[kk])
height_list_in_slice.append(height_list[kk])
return sarc_list_in_slice, radius_list_in_slice, height_list_in_slice
##########################################################################################
def return_x_y_z_mat(matrix, x_lower, x_upper, y_lower, y_upper, z_lower, z_upper):
"""Helper function that returns the X, Y, and Z coordinates of a matrix."""
matrix_X = np.zeros(matrix.shape)
matrix_Y = np.zeros(matrix.shape)
matrix_Z = np.zeros(matrix.shape)
num_x = matrix.shape[0]
num_y = matrix.shape[1]
num_z = matrix.shape[2]
for ii in range(0,num_x):
for jj in range(0,num_y):
for kk in range(0,num_z):
matrix_X[ii,jj,kk] = ii / num_x * (x_upper - x_lower) + x_lower
matrix_Y[ii,jj,kk] = jj / num_y * (y_upper - y_lower) + y_lower
matrix_Z[ii,jj,kk] = kk / num_z * (z_upper - z_lower) + z_lower
return matrix_X, matrix_Y, matrix_Z
##########################################################################################
def point_in_cyl(pt_x,pt_y,pt_z,cyl_p1,cyl_p2,cyl_rad):
"""Helper function that returns 1 if a point is inside a cylinder, 0 otherwise."""
q = np.asarray([pt_x,pt_y,pt_z])
p1 = np.asarray([cyl_p1[0],cyl_p1[1],cyl_p1[2]])
p2 = np.asarray([cyl_p2[0],cyl_p2[1],cyl_p2[2]])
check_1 = np.dot(q-p1,p2-p1)
check_2 = np.dot(q-p2,p2-p1)
if check_1 >=0 and check_2 <= 0:
rad = np.linalg.norm(np.cross( q-p1, p2-p1 )) / np.linalg.norm(p2-p1)
if rad <= cyl_rad:
return 1
else:
return 0
else:
return 0
##########################################################################################
def binary_box(matrix_X,matrix_Y,matrix_Z,cyl_p1,cyl_p2,cyl_rad):
"""Helper function that returns a binary matrix if the point is inside the cylinder."""
num_x = matrix_X.shape[0]
num_y = matrix_Y.shape[1]
num_z = matrix_Z.shape[2]
bin_box = np.zeros((num_x,num_y,num_z))
for ii in range(0,num_x):
for jj in range(0,num_y):
for kk in range(0,num_z):
x = matrix_X[ii,jj,kk]
y = matrix_Y[ii,jj,kk]
z = matrix_Z[ii,jj,kk]
bin_box[ii,jj,kk] = point_in_cyl(x,y,z,cyl_p1,cyl_p2,cyl_rad)
return bin_box
##########################################################################################
def slice_to_matrix(sarc_list,dim_x,dim_y,dim_z,x_lower,x_upper,y_lower,y_upper,z_lower,z_upper, mean_rad, mean_hei, bound_x, bound_y, bound_z, val):
"""Create a 3D matrix where each sarcomere is represented as voxels."""
matrix = np.zeros((dim_x,dim_y,dim_z))
matrix_X, matrix_Y, matrix_Z = return_x_y_z_mat(matrix, x_lower, x_upper, y_lower, y_upper, z_lower, z_upper)
# for each, only add s1 (adding s2 would be redundant)
num_sarc = len(sarc_list)
for kk in range(0,num_sarc):
s1 = sarc_list[kk][0]
s1 = np.asarray([s1[0],s1[1],s1[2]])
s2 = sarc_list[kk][1]
s2 = np.asarray([s2[0],s2[1],s2[2]])
vec = (s2 - s1) / np.linalg.norm(s2-s1)
rad = mean_rad[kk]
hei = mean_hei[kk]
p1 = s1 + vec * hei/2.0
p2 = s1 - vec * hei/2.0
cent_x = int((s1[0] - x_lower)/(x_upper-x_lower) * dim_x)
cent_y = int((s1[1] - y_lower)/(y_upper-y_lower) * dim_y)
cent_z = int((s1[2] - z_lower)/(z_upper-z_lower) * dim_z)
lower_x = np.max([cent_x - bound_x, 0])
upper_x = np.min([cent_x + bound_x, dim_x-1])
lower_y = np.max([cent_y - bound_y, 0])
upper_y = np.min([cent_y + bound_y, dim_y-1])
lower_z = np.max([cent_z - bound_z, 0])
upper_z = np.min([cent_z + bound_z, dim_z-1])
mm_x = matrix_X[lower_x:upper_x,lower_y:upper_y,lower_z:upper_z]
mm_y = matrix_Y[lower_x:upper_x,lower_y:upper_y,lower_z:upper_z]
mm_z = matrix_Z[lower_x:upper_x,lower_y:upper_y,lower_z:upper_z]
bin_box = binary_box(mm_x,mm_y,mm_z,p1,p2,rad)
matrix[lower_x:upper_x,lower_y:upper_y,lower_z:upper_z] += bin_box*val
if kk == num_sarc - 1:
s1 = sarc_list[kk][0]
s1 = np.asarray([s1[0],s1[1],s1[2]])
s2 = sarc_list[kk][1]
s2 = np.asarray([s2[0],s2[1],s2[2]])
vec = (s2 - s1) / np.linalg.norm(s2-s1)
rad = mean_rad[kk]
hei = mean_hei[kk]
p1 = s2 + vec * hei/2.0
p2 = s2 - vec * hei/2.0
cent_x = int((s1[0] - x_lower)/(x_upper-x_lower) * dim_x)
cent_y = int((s1[1] - y_lower)/(y_upper-y_lower) * dim_y)
cent_z = int((s1[2] - z_lower)/(z_upper-z_lower) * dim_z)
lower_x = np.max([cent_x - bound_x, 0])
upper_x = np.min([cent_x + bound_x, dim_x-1])
lower_y = np.max([cent_y - bound_y, 0])
upper_y = np.min([cent_y + bound_y, dim_y-1])
lower_z = np.max([cent_z - bound_z, 0])
upper_z = np.min([cent_z + bound_z, dim_z-1])
mm_x = matrix_X[lower_x:upper_x,lower_y:upper_y,lower_z:upper_z]
mm_y = matrix_Y[lower_x:upper_x,lower_y:upper_y,lower_z:upper_z]
mm_z = matrix_Z[lower_x:upper_x,lower_y:upper_y,lower_z:upper_z]
bin_box = binary_box(mm_x,mm_y,mm_z,p1,p2,rad)
matrix[lower_x:upper_x,lower_y:upper_y,lower_z:upper_z] += bin_box*val
return matrix
##########################################################################################
def matrix_gaussian_blur_fcn(matrix,sig):
"""Function to apply gaussian blur to the matrix that represents sarcomeres as voxels."""
matrix_blur = gaussian_filter(matrix, sigma=sig)
return matrix_blur
##########################################################################################
def matrix_median_blur_fcn(matrix,size):
"""Function to apply median blur to the matrix that represents sarcomeres as voxels."""
matrix_blur = median_filter(matrix_blur, size=size)
return matrix_blur
##########################################################################################
def random_val(matrix,mean,std):
"""Function to apply normally distributed random noise to the matrix that represents sarcomeres as voxels."""
mat = np.random.normal(mean,std,matrix.shape)
matrix += mat
return matrix
##########################################################################################
def cloud_image(a,b,x0,y0,matrix,val):
for ii in range(0,matrix.shape[0]):
for jj in range(0,matrix.shape[1]):
for kk in range(0,matrix.shape[2]):
if ((ii-x0)/a)**2.0 + ((jj - y0)/b)**2.0 < 1:
matrix[ii,jj,kk] += val*10
return matrix
##########################################################################################
def matrix_to_image(matrix,slice_lower,slice_upper):
"""Convert the 3D matrix into a projected 2D image matrix."""
matrix = matrix[:,:,slice_lower:slice_upper]
image = np.sum(matrix,axis=2)
return image
##########################################################################################
def add_perlin_noise(image,octaves,mag_ratio):
"""Add Perlin noise to the image."""
noise = PerlinNoise(octaves,seed=777)
pix0 = image.shape[0]; pix1 = image.shape[1]
pic = [[noise([i/pix0, j/pix1]) for j in range(pix0)] for i in range(pix1)]
# make perlin noise from range 0-1
pic = (pic - np.min(pic)) / (np.max(pic) - np.min(pic))
max_image = np.max(image)
image_with_noise = image + pic * max_image * mag_ratio
return image_with_noise
##########################################################################################
def save_img_stills(image_list,folder_name):
"""Save image stills with correct matplotlib settings."""
folder_name_render = folder_name + '/render'
if not os.path.exists(folder_name_render):
os.makedirs(folder_name_render)
num_images = len(image_list)
for step in range(0,num_images):
image = image_list[step]
plt.figure()
plt.imshow(image)
plt.axis('off')
ax = plt.gca()
ax.set_xticks([]); ax.set_yticks([])
if step < 10:
plt.savefig(folder_name_render + '/frame_00%i.png'%(step),bbox_inches = 'tight',transparent=True,pad_inches = 0)
elif step < 100:
plt.savefig(folder_name_render + '/frame_0%i.png'%(step),bbox_inches = 'tight',transparent=True,pad_inches = 0)
else:
plt.savefig(folder_name_render + '/frame_%i.png'%(step),bbox_inches = 'tight',transparent=True,pad_inches = 0)
plt.close()
return
##########################################################################################
def still_to_avi(folder_name,num_frames,is_GT):
"""Convert still images to an avi."""
folder_name_render = folder_name + '/render'
if is_GT == True:
video_name = folder_name + '/ground_truth_movie/GT_' + folder_name + '.avi'
else:
video_name = folder_name + '/' + folder_name + '.avi'
img_list = []
for kk in range(0,num_frames):
if kk < 10:
fname = 'frame_00%i.png'%(kk)
elif kk < 100:
fname = 'frame_0%i.png'%(kk)
else:
fname = 'frame_%i.png'%(kk)
img_list.append(fname)
images = [img for img in img_list]
if is_GT == True:
frame = cv2.imread(os.path.join(folder_name + '/ground_truth_movie', images[0]))
else:
frame = cv2.imread(os.path.join(folder_name + '/render', images[0]))
height, width, layers = frame.shape
video = cv2.VideoWriter(video_name, 0, 30, (width,height))
for image in images:
if is_GT == True:
video.write(cv2.imread(os.path.join(folder_name + '/ground_truth_movie', image)))
else:
video.write(cv2.imread(os.path.join(folder_name + '/render', image)))
cv2.destroyAllWindows()
video.release()
return
##########################################################################################
def ground_truth_movie(folder_name,num_frames,img_list,sarc_array_normalized, x_pos_array, y_pos_array,x_lower,x_upper,y_lower,y_upper,dim_x,dim_y):
"""Make the ground truth movie from the geometry."""
folder_name_GT = folder_name + '/ground_truth_movie'
if not os.path.exists(folder_name_GT):
os.makedirs(folder_name_GT)
all_normalized = sarc_array_normalized
color_matrix = np.zeros(all_normalized.shape)
for kk in range(0,all_normalized.shape[0]):
for jj in range(0,all_normalized.shape[1]):
of = all_normalized[kk,jj]
if of < -.2:
color_matrix[kk,jj] = 0
elif of > .2:
color_matrix[kk,jj] = 1
else:
color_matrix[kk,jj] = of*2.5 + .5
for t in range(0,num_frames):
img = img_list[t]
plt.figure()
plt.imshow(img)
for kk in range(0,all_normalized.shape[0]):
col = (1 - color_matrix[kk,t], 0, color_matrix[kk,t])
yy = (y_pos_array[kk,t] - y_lower)/(y_upper-y_lower)*dim_y
xx = (x_pos_array[kk,t] - x_lower)/(x_upper-x_lower)*dim_x
plt.plot(yy,xx,'.',c=col)
ax = plt.gca()
ax.set_xticks([]); ax.set_yticks([])
plt.axis('off')
if t < 10:
plt.savefig(folder_name_GT + '/frame_00%i.png'%(t),bbox_inches = 'tight',transparent=True,pad_inches = 0)
elif t < 100:
plt.savefig(folder_name_GT + '/frame_0%i.png'%(t),bbox_inches = 'tight',transparent=True,pad_inches = 0)
else:
plt.savefig(folder_name_GT + '/frame_%i.png'%(t),bbox_inches = 'tight',transparent=True,pad_inches = 0)
plt.close()
return
|
[
"perlin_noise.PerlinNoise",
"numpy.sum",
"matplotlib.pyplot.figure",
"numpy.linalg.norm",
"numpy.random.normal",
"matplotlib.pyplot.gca",
"cv2.VideoWriter",
"scipy.ndimage.median_filter",
"os.path.join",
"scipy.ndimage.gaussian_filter",
"matplotlib.pyplot.imshow",
"matplotlib.pyplot.close",
"os.path.exists",
"numpy.max",
"cv2.destroyAllWindows",
"numpy.asarray",
"numpy.cross",
"numpy.min",
"numpy.dot",
"os.makedirs",
"matplotlib.pyplot.plot",
"numpy.zeros",
"matplotlib.pyplot.axis",
"numpy.random.random",
"matplotlib.pyplot.savefig"
] |
[((2777, 2799), 'numpy.zeros', 'np.zeros', (['matrix.shape'], {}), '(matrix.shape)\n', (2785, 2799), True, 'import numpy as np\n'), ((2812, 2834), 'numpy.zeros', 'np.zeros', (['matrix.shape'], {}), '(matrix.shape)\n', (2820, 2834), True, 'import numpy as np\n'), ((2847, 2869), 'numpy.zeros', 'np.zeros', (['matrix.shape'], {}), '(matrix.shape)\n', (2855, 2869), True, 'import numpy as np\n'), ((3512, 3542), 'numpy.asarray', 'np.asarray', (['[pt_x, pt_y, pt_z]'], {}), '([pt_x, pt_y, pt_z])\n', (3522, 3542), True, 'import numpy as np\n'), ((3547, 3592), 'numpy.asarray', 'np.asarray', (['[cyl_p1[0], cyl_p1[1], cyl_p1[2]]'], {}), '([cyl_p1[0], cyl_p1[1], cyl_p1[2]])\n', (3557, 3592), True, 'import numpy as np\n'), ((3597, 3642), 'numpy.asarray', 'np.asarray', (['[cyl_p2[0], cyl_p2[1], cyl_p2[2]]'], {}), '([cyl_p2[0], cyl_p2[1], cyl_p2[2]])\n', (3607, 3642), True, 'import numpy as np\n'), ((3652, 3675), 'numpy.dot', 'np.dot', (['(q - p1)', '(p2 - p1)'], {}), '(q - p1, p2 - p1)\n', (3658, 3675), True, 'import numpy as np\n'), ((3682, 3705), 'numpy.dot', 'np.dot', (['(q - p2)', '(p2 - p1)'], {}), '(q - p2, p2 - p1)\n', (3688, 3705), True, 'import numpy as np\n'), ((4221, 4252), 'numpy.zeros', 'np.zeros', (['(num_x, num_y, num_z)'], {}), '((num_x, num_y, num_z))\n', (4229, 4252), True, 'import numpy as np\n'), ((4829, 4860), 'numpy.zeros', 'np.zeros', (['(dim_x, dim_y, dim_z)'], {}), '((dim_x, dim_y, dim_z))\n', (4837, 4860), True, 'import numpy as np\n'), ((7496, 7530), 'scipy.ndimage.gaussian_filter', 'gaussian_filter', (['matrix'], {'sigma': 'sig'}), '(matrix, sigma=sig)\n', (7511, 7530), False, 'from scipy.ndimage import gaussian_filter, median_filter\n'), ((7789, 7826), 'scipy.ndimage.median_filter', 'median_filter', (['matrix_blur'], {'size': 'size'}), '(matrix_blur, size=size)\n', (7802, 7826), False, 'from scipy.ndimage import gaussian_filter, median_filter\n'), ((8091, 8132), 'numpy.random.normal', 'np.random.normal', (['mean', 'std', 'matrix.shape'], {}), '(mean, std, matrix.shape)\n', (8107, 8132), True, 'import numpy as np\n'), ((8774, 8796), 'numpy.sum', 'np.sum', (['matrix'], {'axis': '(2)'}), '(matrix, axis=2)\n', (8780, 8796), True, 'import numpy as np\n'), ((8996, 9026), 'perlin_noise.PerlinNoise', 'PerlinNoise', (['octaves'], {'seed': '(777)'}), '(octaves, seed=777)\n', (9007, 9026), False, 'from perlin_noise import PerlinNoise\n'), ((9255, 9268), 'numpy.max', 'np.max', (['image'], {}), '(image)\n', (9261, 9268), True, 'import numpy as np\n'), ((11149, 11200), 'cv2.VideoWriter', 'cv2.VideoWriter', (['video_name', '(0)', '(30)', '(width, height)'], {}), '(video_name, 0, 30, (width, height))\n', (11164, 11200), False, 'import cv2\n'), ((11412, 11435), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (11433, 11435), False, 'import cv2\n'), ((11937, 11967), 'numpy.zeros', 'np.zeros', (['all_normalized.shape'], {}), '(all_normalized.shape)\n', (11945, 11967), True, 'import numpy as np\n'), ((5114, 5147), 'numpy.asarray', 'np.asarray', (['[s1[0], s1[1], s1[2]]'], {}), '([s1[0], s1[1], s1[2]])\n', (5124, 5147), True, 'import numpy as np\n'), ((5177, 5210), 'numpy.asarray', 'np.asarray', (['[s2[0], s2[1], s2[2]]'], {}), '([s2[0], s2[1], s2[2]])\n', (5187, 5210), True, 'import numpy as np\n'), ((5546, 5575), 'numpy.max', 'np.max', (['[cent_x - bound_x, 0]'], {}), '([cent_x - bound_x, 0])\n', (5552, 5575), True, 'import numpy as np\n'), ((5588, 5625), 'numpy.min', 'np.min', (['[cent_x + bound_x, dim_x - 1]'], {}), '([cent_x + bound_x, dim_x - 1])\n', (5594, 5625), True, 'import numpy as np\n'), ((5636, 5665), 'numpy.max', 'np.max', (['[cent_y - bound_y, 0]'], {}), '([cent_y - bound_y, 0])\n', (5642, 5665), True, 'import numpy as np\n'), ((5678, 5715), 'numpy.min', 'np.min', (['[cent_y + bound_y, dim_y - 1]'], {}), '([cent_y + bound_y, dim_y - 1])\n', (5684, 5715), True, 'import numpy as np\n'), ((5726, 5755), 'numpy.max', 'np.max', (['[cent_z - bound_z, 0]'], {}), '([cent_z - bound_z, 0])\n', (5732, 5755), True, 'import numpy as np\n'), ((5768, 5805), 'numpy.min', 'np.min', (['[cent_z + bound_z, dim_z - 1]'], {}), '([cent_z + bound_z, dim_z - 1])\n', (5774, 5805), True, 'import numpy as np\n'), ((9602, 9636), 'os.path.exists', 'os.path.exists', (['folder_name_render'], {}), '(folder_name_render)\n', (9616, 9636), False, 'import os\n'), ((9640, 9671), 'os.makedirs', 'os.makedirs', (['folder_name_render'], {}), '(folder_name_render)\n', (9651, 9671), False, 'import os\n'), ((9765, 9777), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (9775, 9777), True, 'import matplotlib.pyplot as plt\n'), ((9780, 9797), 'matplotlib.pyplot.imshow', 'plt.imshow', (['image'], {}), '(image)\n', (9790, 9797), True, 'import matplotlib.pyplot as plt\n'), ((9800, 9815), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (9808, 9815), True, 'import matplotlib.pyplot as plt\n'), ((9823, 9832), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (9830, 9832), True, 'import matplotlib.pyplot as plt\n'), ((10262, 10273), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (10271, 10273), True, 'import matplotlib.pyplot as plt\n'), ((11819, 11849), 'os.path.exists', 'os.path.exists', (['folder_name_GT'], {}), '(folder_name_GT)\n', (11833, 11849), False, 'import os\n'), ((11853, 11880), 'os.makedirs', 'os.makedirs', (['folder_name_GT'], {}), '(folder_name_GT)\n', (11864, 11880), False, 'import os\n'), ((12283, 12295), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (12293, 12295), True, 'import matplotlib.pyplot as plt\n'), ((12298, 12313), 'matplotlib.pyplot.imshow', 'plt.imshow', (['img'], {}), '(img)\n', (12308, 12313), True, 'import matplotlib.pyplot as plt\n'), ((12581, 12590), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (12588, 12590), True, 'import matplotlib.pyplot as plt\n'), ((12632, 12647), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (12640, 12647), True, 'import matplotlib.pyplot as plt\n'), ((13011, 13022), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (13020, 13022), True, 'import matplotlib.pyplot as plt\n'), ((3785, 3808), 'numpy.linalg.norm', 'np.linalg.norm', (['(p2 - p1)'], {}), '(p2 - p1)\n', (3799, 3808), True, 'import numpy as np\n'), ((5229, 5252), 'numpy.linalg.norm', 'np.linalg.norm', (['(s2 - s1)'], {}), '(s2 - s1)\n', (5243, 5252), True, 'import numpy as np\n'), ((6196, 6229), 'numpy.asarray', 'np.asarray', (['[s1[0], s1[1], s1[2]]'], {}), '([s1[0], s1[1], s1[2]])\n', (6206, 6229), True, 'import numpy as np\n'), ((6261, 6294), 'numpy.asarray', 'np.asarray', (['[s2[0], s2[1], s2[2]]'], {}), '([s2[0], s2[1], s2[2]])\n', (6271, 6294), True, 'import numpy as np\n'), ((6639, 6668), 'numpy.max', 'np.max', (['[cent_x - bound_x, 0]'], {}), '([cent_x - bound_x, 0])\n', (6645, 6668), True, 'import numpy as np\n'), ((6682, 6719), 'numpy.min', 'np.min', (['[cent_x + bound_x, dim_x - 1]'], {}), '([cent_x + bound_x, dim_x - 1])\n', (6688, 6719), True, 'import numpy as np\n'), ((6731, 6760), 'numpy.max', 'np.max', (['[cent_y - bound_y, 0]'], {}), '([cent_y - bound_y, 0])\n', (6737, 6760), True, 'import numpy as np\n'), ((6774, 6811), 'numpy.min', 'np.min', (['[cent_y + bound_y, dim_y - 1]'], {}), '([cent_y + bound_y, dim_y - 1])\n', (6780, 6811), True, 'import numpy as np\n'), ((6823, 6852), 'numpy.max', 'np.max', (['[cent_z - bound_z, 0]'], {}), '([cent_z - bound_z, 0])\n', (6829, 6852), True, 'import numpy as np\n'), ((6866, 6903), 'numpy.min', 'np.min', (['[cent_z + bound_z, dim_z - 1]'], {}), '([cent_z + bound_z, dim_z - 1])\n', (6872, 6903), True, 'import numpy as np\n'), ((9199, 9210), 'numpy.min', 'np.min', (['pic'], {}), '(pic)\n', (9205, 9210), True, 'import numpy as np\n'), ((9215, 9226), 'numpy.max', 'np.max', (['pic'], {}), '(pic)\n', (9221, 9226), True, 'import numpy as np\n'), ((9229, 9240), 'numpy.min', 'np.min', (['pic'], {}), '(pic)\n', (9235, 9240), True, 'import numpy as np\n'), ((9891, 10007), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(folder_name_render + '/frame_00%i.png' % step)"], {'bbox_inches': '"""tight"""', 'transparent': '(True)', 'pad_inches': '(0)'}), "(folder_name_render + '/frame_00%i.png' % step, bbox_inches=\n 'tight', transparent=True, pad_inches=0)\n", (9902, 10007), True, 'import matplotlib.pyplot as plt\n'), ((10959, 11019), 'os.path.join', 'os.path.join', (["(folder_name + '/ground_truth_movie')", 'images[0]'], {}), "(folder_name + '/ground_truth_movie', images[0])\n", (10971, 11019), False, 'import os\n'), ((11049, 11097), 'os.path.join', 'os.path.join', (["(folder_name + '/render')", 'images[0]'], {}), "(folder_name + '/render', images[0])\n", (11061, 11097), False, 'import os\n'), ((12544, 12572), 'matplotlib.pyplot.plot', 'plt.plot', (['yy', 'xx', '"""."""'], {'c': 'col'}), "(yy, xx, '.', c=col)\n", (12552, 12572), True, 'import matplotlib.pyplot as plt\n'), ((12664, 12772), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(folder_name_GT + '/frame_00%i.png' % t)"], {'bbox_inches': '"""tight"""', 'transparent': '(True)', 'pad_inches': '(0)'}), "(folder_name_GT + '/frame_00%i.png' % t, bbox_inches='tight',\n transparent=True, pad_inches=0)\n", (12675, 12772), True, 'import matplotlib.pyplot as plt\n'), ((1440, 1477), 'numpy.random.normal', 'np.random.normal', (['(0)', 'parameter_radius'], {}), '(0, parameter_radius)\n', (1456, 1477), True, 'import numpy as np\n'), ((1607, 1644), 'numpy.random.normal', 'np.random.normal', (['(0)', 'parameter_height'], {}), '(0, parameter_height)\n', (1623, 1644), True, 'import numpy as np\n'), ((3758, 3783), 'numpy.cross', 'np.cross', (['(q - p1)', '(p2 - p1)'], {}), '(q - p1, p2 - p1)\n', (3766, 3783), True, 'import numpy as np\n'), ((6314, 6337), 'numpy.linalg.norm', 'np.linalg.norm', (['(s2 - s1)'], {}), '(s2 - s1)\n', (6328, 6337), True, 'import numpy as np\n'), ((10026, 10141), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(folder_name_render + '/frame_0%i.png' % step)"], {'bbox_inches': '"""tight"""', 'transparent': '(True)', 'pad_inches': '(0)'}), "(folder_name_render + '/frame_0%i.png' % step, bbox_inches=\n 'tight', transparent=True, pad_inches=0)\n", (10037, 10141), True, 'import matplotlib.pyplot as plt\n'), ((10149, 10263), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(folder_name_render + '/frame_%i.png' % step)"], {'bbox_inches': '"""tight"""', 'transparent': '(True)', 'pad_inches': '(0)'}), "(folder_name_render + '/frame_%i.png' % step, bbox_inches=\n 'tight', transparent=True, pad_inches=0)\n", (10160, 10263), True, 'import matplotlib.pyplot as plt\n'), ((12789, 12896), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(folder_name_GT + '/frame_0%i.png' % t)"], {'bbox_inches': '"""tight"""', 'transparent': '(True)', 'pad_inches': '(0)'}), "(folder_name_GT + '/frame_0%i.png' % t, bbox_inches='tight',\n transparent=True, pad_inches=0)\n", (12800, 12896), True, 'import matplotlib.pyplot as plt\n'), ((12905, 13011), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(folder_name_GT + '/frame_%i.png' % t)"], {'bbox_inches': '"""tight"""', 'transparent': '(True)', 'pad_inches': '(0)'}), "(folder_name_GT + '/frame_%i.png' % t, bbox_inches='tight',\n transparent=True, pad_inches=0)\n", (12916, 13011), True, 'import matplotlib.pyplot as plt\n'), ((11269, 11325), 'os.path.join', 'os.path.join', (["(folder_name + '/ground_truth_movie')", 'image'], {}), "(folder_name + '/ground_truth_movie', image)\n", (11281, 11325), False, 'import os\n'), ((11362, 11406), 'os.path.join', 'os.path.join', (["(folder_name + '/render')", 'image'], {}), "(folder_name + '/render', image)\n", (11374, 11406), False, 'import os\n'), ((1508, 1527), 'numpy.random.random', 'np.random.random', (['(1)'], {}), '(1)\n', (1524, 1527), True, 'import numpy as np\n'), ((1675, 1694), 'numpy.random.random', 'np.random.random', (['(1)'], {}), '(1)\n', (1691, 1694), True, 'import numpy as np\n')]
|
import random
a1 = input('Primeiro aluno: ')
a2 = input('Segundo aluno: ')
a3 = input('Terceiro aluno: ')
a4 = input('Quarto aluno: ')
sorteio = [a1,a2,a3,a4]
random.shuffle(sorteio)
#Shuffle (embaralha lista)
#Choice (Escolhe 1 da lista)
print(f'Ordem de apresentação: {sorteio}')
|
[
"random.shuffle"
] |
[((165, 188), 'random.shuffle', 'random.shuffle', (['sorteio'], {}), '(sorteio)\n', (179, 188), False, 'import random\n')]
|
"""This module provides the necessary cryptographic primitives for the system.
It is based on the `cryptography <https://cryptography.io/en/latest/>`_
package."""
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives import hashes, serialization
from cryptography.hazmat.primitives.asymmetric import padding
from cryptography.hazmat.primitives.ciphers import Cipher, algorithms, modes
def encrypt_aes_gcm(key, iv, plaintext, associated_data=b""):
"""
Method for encrypting AES-GCM
:param key: byte
:param plaintext: byte
:param associated_data: byte
:param iv: byte
:return: byte, byte
"""
#: Construct an AES-GCM Cipher object with the given key and a
#: randomly generated IV.
encryptor = Cipher(
algorithms.AES(key),
modes.GCM(iv),
backend=default_backend()
).encryptor()
#: Associated_data will be authenticated but not encrypted,
#: it must also be passed in on decryption.
encryptor.authenticate_additional_data(associated_data)
#: Encrypt the plaintext and get the associated cipher text.
#: GCM does not require padding.
cipher_text = encryptor.update(plaintext) + encryptor.finalize()
return cipher_text, encryptor.tag
def decrypt_aes_gcm(key, iv, auth_tag, cipher_text, associated_data=b""):
"""Method to decrypt AES in GCM mode.
Constructs a :class:`Cipher <cryptography.hazmat.primitives.ciphers.Cipher>`
object from key, iv and authentication tag. The associated data is passed in
during decryption.
Args:
key (bytes): The symmetric key used during decryption.
iv (bytes): The initialisation vector used during decryption.
auth_tag (bytes): The authentication tag used during decryption.
cipher_text (bytes): Cipher text to decrypt.
associated_data (bytes): Additional authentication data that was passed
in during encryption.
Returns:
bytes: The decrypted cipher text as bytes.
Raises:
InvalidTag: The authentication tag in combination with the given
parameters is invalid.
"""
decryptor = Cipher(
algorithms.AES(key),
modes.GCM(iv, auth_tag),
backend=default_backend()
).decryptor()
decryptor.authenticate_additional_data(associated_data)
plaintext = decryptor.update(cipher_text) + decryptor.finalize()
return plaintext
def create_signature(private_key, data):
"""
Create PKCS#1 signature using SHA256.
:param private_key: byte
:param data: byte
:return: byte
"""
private_key = serialization.load_pem_private_key(
private_key,
password=None,
backend=default_backend()
)
signer = private_key.signer(
padding.PKCS1v15(),
hashes.SHA256()
)
signer.update(data)
signed_data = signer.finalize()
return signed_data
def verify_signature(public_key, signature, data):
"""
Verify PKCS#1 signature using SHA256.
Raises an InvalidSignature Exception on failure.
:param public_key: byte
:param signature: byte
:param data: byte
:return:
"""
public_key = serialization.load_pem_public_key(
public_key,
backend=default_backend()
)
verifier = public_key.verifier(
signature,
padding.PKCS1v15(),
hashes.SHA256()
)
verifier.update(data)
verifier.verify()
|
[
"cryptography.hazmat.primitives.hashes.SHA256",
"cryptography.hazmat.primitives.ciphers.algorithms.AES",
"cryptography.hazmat.primitives.ciphers.modes.GCM",
"cryptography.hazmat.backends.default_backend",
"cryptography.hazmat.primitives.asymmetric.padding.PKCS1v15"
] |
[((2796, 2814), 'cryptography.hazmat.primitives.asymmetric.padding.PKCS1v15', 'padding.PKCS1v15', ([], {}), '()\n', (2812, 2814), False, 'from cryptography.hazmat.primitives.asymmetric import padding\n'), ((2824, 2839), 'cryptography.hazmat.primitives.hashes.SHA256', 'hashes.SHA256', ([], {}), '()\n', (2837, 2839), False, 'from cryptography.hazmat.primitives import hashes, serialization\n'), ((3361, 3379), 'cryptography.hazmat.primitives.asymmetric.padding.PKCS1v15', 'padding.PKCS1v15', ([], {}), '()\n', (3377, 3379), False, 'from cryptography.hazmat.primitives.asymmetric import padding\n'), ((3389, 3404), 'cryptography.hazmat.primitives.hashes.SHA256', 'hashes.SHA256', ([], {}), '()\n', (3402, 3404), False, 'from cryptography.hazmat.primitives import hashes, serialization\n'), ((2730, 2747), 'cryptography.hazmat.backends.default_backend', 'default_backend', ([], {}), '()\n', (2745, 2747), False, 'from cryptography.hazmat.backends import default_backend\n'), ((3273, 3290), 'cryptography.hazmat.backends.default_backend', 'default_backend', ([], {}), '()\n', (3288, 3290), False, 'from cryptography.hazmat.backends import default_backend\n'), ((794, 813), 'cryptography.hazmat.primitives.ciphers.algorithms.AES', 'algorithms.AES', (['key'], {}), '(key)\n', (808, 813), False, 'from cryptography.hazmat.primitives.ciphers import Cipher, algorithms, modes\n'), ((823, 836), 'cryptography.hazmat.primitives.ciphers.modes.GCM', 'modes.GCM', (['iv'], {}), '(iv)\n', (832, 836), False, 'from cryptography.hazmat.primitives.ciphers import Cipher, algorithms, modes\n'), ((2188, 2207), 'cryptography.hazmat.primitives.ciphers.algorithms.AES', 'algorithms.AES', (['key'], {}), '(key)\n', (2202, 2207), False, 'from cryptography.hazmat.primitives.ciphers import Cipher, algorithms, modes\n'), ((2217, 2240), 'cryptography.hazmat.primitives.ciphers.modes.GCM', 'modes.GCM', (['iv', 'auth_tag'], {}), '(iv, auth_tag)\n', (2226, 2240), False, 'from cryptography.hazmat.primitives.ciphers import Cipher, algorithms, modes\n'), ((854, 871), 'cryptography.hazmat.backends.default_backend', 'default_backend', ([], {}), '()\n', (869, 871), False, 'from cryptography.hazmat.backends import default_backend\n'), ((2258, 2275), 'cryptography.hazmat.backends.default_backend', 'default_backend', ([], {}), '()\n', (2273, 2275), False, 'from cryptography.hazmat.backends import default_backend\n')]
|
#!/usr/bin/env python3
# Test Interactive
import datetime
from core import config,raiDB,SMSInteractive
input = True
doorType = 'glass'
print(datetime.datetime.now())
databaseInteractive.interactive(input,doorType)
#SMSInteractive.interactive(input)
|
[
"datetime.datetime.now"
] |
[((144, 167), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (165, 167), False, 'import datetime\n')]
|
_url = 'https://raw.githubusercontent.com/mikkokotila/version-controlled-data/master/data/polymod_social_contact_data.csv'
class Polymod:
def __init__(self, data=None):
import pandas as _pd
self.data = _pd.read_csv(_url)
def country_data(self, country_code='fi'):
# get the country columns to drop it before return
cols = []
for col in self.data.columns:
if 'country_' in col:
cols.append(col)
return p.data[p.data['country_' + country_code] == 1].drop(cols, 1)
def _build_population(self,
population_size,
age_distribution=[15, 65, 20]):
'''Returns a population expressed as a 1d array where
each record is a member of the population.'''
import numpy as np
self.population = np.random.choice([1, 2, 3], size=population_size, p=np.array(age_distribution) / 100)
def _build_contacts(self, data, probabilities=False):
'''Returns participant level daily contact record
in absolute values or probabilities.'''
temp = data.copy(deep=True)
temp = temp.groupby('participant_id').sum()
cols = ['contact_home',
'contact_work',
'contact_school',
'contact_transport',
'contact_leisure',
'contact_other']
temp = temp[cols]
if probabilities:
temp['contact_total'] = temp.sum(axis=1)
for col in cols:
temp[col] = temp[col] / temp['contact_total']
return temp.dropna()
def _build_age_groups(self, country_code):
country_data = self.country_data(country_code)
country_data['0-14'] = country_data.participant_age.between(0, 14).astype(int)
country_data['15-64'] = country_data.participant_age.between(15, 64).astype(int)
country_data['65-100'] = country_data.participant_age.between(64, 100).astype(int)
self.age_young = country_data[country_data.participant_age.between(0, 14)]
self.age_adult = country_data[country_data.participant_age.between(15, 64)]
self.age_elderly = country_data[country_data.participant_age.between(64, 100)]
def raw_daily_contacts(self, country_code='fi', probabilities=False):
self._build_age_groups(country_code)
if probabilities:
young = self._build_contacts(self.age_young, True).values
adult = self._build_contacts(self.age_adult, True).values
elderly = self._build_contacts(self.age_elderly, True).values
else:
young = self._build_contacts(self.age_young).values
adult = self._build_contacts(self.age_adult).values
elderly = self._build_contacts(self.age_elderly).values
return young, adult, elderly
def total_daily_contacts(self,
population_size=1000,
country_code='fi',
multiplier=1,
age_distribution=[15, 65, 20],
restrictions=[0,0,0,0,0,0]):
import random
import numpy as np
restrictions = np.array(restrictions)
self._build_age_groups(country_code)
self._build_population(population_size=population_size, age_distribution=age_distribution)
out = []
young = (self.population == 1).sum() * multiplier
adult = (self.population == 2).sum() * multiplier
elderly = (self.population == 3).sum() * multiplier
young_picks = self._build_contacts(self.age_young).values * (1 - restrictions)
adult_picks = self._build_contacts(self.age_adult).values * (1 - restrictions)
elderly_picks = self._build_contacts(self.age_elderly).values * (1 - restrictions)
out = random.choices(young_picks.tolist(), k=young)
out += random.choices(adult_picks.tolist(), k=adult)
out += random.choices(elderly_picks.tolist(), k=elderly)
return [int(i) for i in np.array(out).sum(0)]
p = Polymod()
|
[
"pandas.read_csv",
"numpy.array"
] |
[((246, 264), 'pandas.read_csv', '_pd.read_csv', (['_url'], {}), '(_url)\n', (258, 264), True, 'import pandas as _pd\n'), ((3351, 3373), 'numpy.array', 'np.array', (['restrictions'], {}), '(restrictions)\n', (3359, 3373), True, 'import numpy as np\n'), ((953, 979), 'numpy.array', 'np.array', (['age_distribution'], {}), '(age_distribution)\n', (961, 979), True, 'import numpy as np\n'), ((4212, 4225), 'numpy.array', 'np.array', (['out'], {}), '(out)\n', (4220, 4225), True, 'import numpy as np\n')]
|
"""
Copyright: Wenyi Tang 2017-2019
Author: <NAME>
Email: <EMAIL>
Created Date: Jan 7th, 2019
Misc utility tools
- make TFRecords files
"""
# Copyright (c): <NAME> 2017-2019.
# Author: <NAME>
# Email: <EMAIL>
# Update Date: 2019/4/3 下午5:03
import tensorflow as tf
def make_tensor_label_records(tensors, labels, writer):
assert isinstance(tensors, (list, tuple))
assert isinstance(labels, (list, tuple))
assert len(tensors) == len(labels)
example = tf.train.Example(features=tf.train.Features())
for _t, _l in zip(tensors, labels):
assert isinstance(_t, bytes)
assert isinstance(_l, str)
bl = tf.train.BytesList(value=[_t])
ff = example.features.feature.get_or_create(_l)
ff.MergeFrom(tf.train.Feature(bytes_list=bl))
writer.write(example.SerializeToString())
|
[
"tensorflow.train.BytesList",
"tensorflow.train.Features",
"tensorflow.train.Feature"
] |
[((625, 655), 'tensorflow.train.BytesList', 'tf.train.BytesList', ([], {'value': '[_t]'}), '(value=[_t])\n', (643, 655), True, 'import tensorflow as tf\n'), ((492, 511), 'tensorflow.train.Features', 'tf.train.Features', ([], {}), '()\n', (509, 511), True, 'import tensorflow as tf\n'), ((725, 756), 'tensorflow.train.Feature', 'tf.train.Feature', ([], {'bytes_list': 'bl'}), '(bytes_list=bl)\n', (741, 756), True, 'import tensorflow as tf\n')]
|
import pytest
from sitri.contrib.system import SystemConfigProvider, SystemCredentialProvider
@pytest.fixture(scope="module")
def system_config() -> SystemConfigProvider:
return SystemConfigProvider(prefix="test")
@pytest.fixture(scope="module")
def system_credential() -> SystemCredentialProvider:
return SystemCredentialProvider(prefix="test")
|
[
"sitri.contrib.system.SystemCredentialProvider",
"pytest.fixture",
"sitri.contrib.system.SystemConfigProvider"
] |
[((98, 128), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""module"""'}), "(scope='module')\n", (112, 128), False, 'import pytest\n'), ((224, 254), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""module"""'}), "(scope='module')\n", (238, 254), False, 'import pytest\n'), ((185, 220), 'sitri.contrib.system.SystemConfigProvider', 'SystemConfigProvider', ([], {'prefix': '"""test"""'}), "(prefix='test')\n", (205, 220), False, 'from sitri.contrib.system import SystemConfigProvider, SystemCredentialProvider\n'), ((319, 358), 'sitri.contrib.system.SystemCredentialProvider', 'SystemCredentialProvider', ([], {'prefix': '"""test"""'}), "(prefix='test')\n", (343, 358), False, 'from sitri.contrib.system import SystemConfigProvider, SystemCredentialProvider\n')]
|
"""Main module."""
import whisk
from whisk.cli.log_tree import PARENT_TREE_NODE_PREFIX, CHILD_TREE_NODE_PREFIX
from cookiecutter.main import cookiecutter
from os.path import realpath
# https://docs.python.org/3/library/pathlib.html
# Object-oriented filesystem paths
from pathlib import Path
import logging
logger = logging.getLogger(__name__)
def root_module_dir():
"""
Returns a Path object with the root whisk module directory.
"""
filepath = realpath(__file__)
return Path(filepath).parents[0]
def cookiecutter_template_dir():
return str(root_module_dir() / 'template/')
def to_slug(str):
"""
Converts a string to a slug:
* Makes all letters lowercase
* Replaces spaces with underscores
"""
return str.lower().replace(' ', '_')
def create(dir, force=False,
module_name=None,
dependency=f"whisk=={whisk.__version__}",
install_requires=f"whisk=={whisk.__version__}"):
"""
Creates a whisk project.
Parameters
----------
dir : str
Path of the directory to create the project. The directory name is
converted to a slug via :func:`project_name_to_slug`.
module_name : str, optional
Name of the module used when importing the project. This is converted to a
slug via :func:`project_name_to_slug`. Default is the ``project_name``.
force : bool, optional
Recreates the project directory if it exists. Default is `False`.
dependency : str, optional
The whisk dependency entry in the project's requirements.txt file.
Default locks to the current version. The version lock is restrictive
as earlier and later versions of whisk could expect a different
template structure and break functionality.
install_requires : str, optional
The whisk ``install_requires`` entry in the project's ``setup.py``
file. Default locks to the current version. The version lock is
restrictive as earlier and later versions of whisk could expect a
different template structure and break functionality.
"""
path = Path(dir).absolute()
logger.debug(f"Creating project in {path}.")
project_name = path.stem
output_dir = path.parent
project_name_slug = to_slug(project_name)
if module_name:
module_name_slug = to_slug(module_name)
else:
module_name_slug = project_name_slug
# `whisk_dependency` is more flexible (for example, specifying a local
# install) than `whisk_install_requires` and is used in testing to require
# the local version of whisk.
extra_content = {
"repo_name": project_name_slug,
"project_name": module_name_slug,
"whisk_dependency": dependency,
"whisk_install_requires": install_requires
}
logger.debug(f"Creating whisk project with extra_content={extra_content}")
logger.info(PARENT_TREE_NODE_PREFIX +
"Creating project directory structure...")
res = cookiecutter(cookiecutter_template_dir(),
no_input=True,
overwrite_if_exists=force,
output_dir=output_dir,
extra_context=extra_content)
logger.info(CHILD_TREE_NODE_PREFIX+"Project created in %s", res)
logger.info(CHILD_TREE_NODE_PREFIX+"DONE.")
return res
|
[
"os.path.realpath",
"logging.getLogger",
"pathlib.Path"
] |
[((317, 344), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (334, 344), False, 'import logging\n'), ((465, 483), 'os.path.realpath', 'realpath', (['__file__'], {}), '(__file__)\n', (473, 483), False, 'from os.path import realpath\n'), ((495, 509), 'pathlib.Path', 'Path', (['filepath'], {}), '(filepath)\n', (499, 509), False, 'from pathlib import Path\n'), ((2128, 2137), 'pathlib.Path', 'Path', (['dir'], {}), '(dir)\n', (2132, 2137), False, 'from pathlib import Path\n')]
|
# -*- coding: utf-8 -*-
# @Time : 2019/1/8 下午8:20
# @Author : yidxue
from __future__ import division
import numpy
import datetime
import pandas as pd
def get_all_file_path(path):
"""
循环遍历,得到一个文件夹第一层下的文件路径
"""
import os
file_name_list = os.listdir(path)
return [path + os.sep + file_name for file_name in file_name_list]
def read_file(path_ls):
"""
读数据
"""
map = {}
for file_path in path_ls:
with open(file_path, mode="r") as in_file:
for i, line in enumerate(in_file):
if not (line.strip == "" or line.startswith('clusterid')):
data = line.strip().split(",")
cluster = data[0]
timestamp = data[1]
rtts = float(data[7])
if cluster in map.keys():
map[cluster][timestamp] = rtts
else:
cluster_map = {timestamp: rtts}
map[cluster] = cluster_map
return map
def write_file(file_path, context_ls, method='a'):
"""
写数据到一个文件
:param file_path:
:param method: 'a'表示默认为追加方式, 'wb'表示覆盖或者创建文件写入
:param context:
"""
with open(file_path, method) as fo:
for text in context_ls:
fo.write(text + "\n")
# 关闭打开的文件
fo.close()
def calculate_std(dps, moving_average):
variance = 0
flag_list = moving_average.isnull()
count = 0
for index in range(len(dps)):
if flag_list[index]:
count += 1
continue
variance += (dps[index] - moving_average[index]) ** 2
variance /= (len(dps) - count)
return numpy.sqrt(variance)
day = '2018-12-24'
# 1. 读数据
path = '/Users/cisco/Downloads/abnormal_value_2018lastweek/abnormal_value_{day}.csv'
path_ls = get_all_file_path(path.format(day=day))
# 2. 读数据
data_dict = read_file(path_ls)
# 3. 每个cluster时间戳进行排序
DESC = False
# 列表推导生成字典,这个字典的value的是排序后的另一个字典
data_sort = {
cluster: sorted(data_dict[cluster].items(), key=lambda d: datetime.datetime.strptime(d[0], '%Y-%m-%d %H:%M:%S'),
reverse=DESC)
for cluster in data_dict.keys()}
cluster = {}
for key in data_sort.keys():
cluster[key] = pd.Series({item[0]: item[1] for item in data_sort[key]})
# 4. 异常检测
for key in cluster.keys():
dps = pd.Series(cluster[key])
ewma_line = dps.ewm(span=4).mean()
ewma_std = calculate_std(dps, ewma_line)
result = []
for index in ewma_line.index:
if not (ewma_line[index] - ewma_std <= dps[index] <= ewma_line[index] + ewma_std):
result.append(key + "," + index + "," + str(dps[index]) + ",1")
else:
result.append(key + "," + index + "," + str(dps[index]) + ",0")
# 存数据
write_file('/Users/cisco/Desktop/{day}.csv'.format(day=day), result)
|
[
"pandas.Series",
"datetime.datetime.strptime",
"os.listdir",
"numpy.sqrt"
] |
[((262, 278), 'os.listdir', 'os.listdir', (['path'], {}), '(path)\n', (272, 278), False, 'import os\n'), ((1673, 1693), 'numpy.sqrt', 'numpy.sqrt', (['variance'], {}), '(variance)\n', (1683, 1693), False, 'import numpy\n'), ((2234, 2290), 'pandas.Series', 'pd.Series', (['{item[0]: item[1] for item in data_sort[key]}'], {}), '({item[0]: item[1] for item in data_sort[key]})\n', (2243, 2290), True, 'import pandas as pd\n'), ((2339, 2362), 'pandas.Series', 'pd.Series', (['cluster[key]'], {}), '(cluster[key])\n', (2348, 2362), True, 'import pandas as pd\n'), ((2046, 2099), 'datetime.datetime.strptime', 'datetime.datetime.strptime', (['d[0]', '"""%Y-%m-%d %H:%M:%S"""'], {}), "(d[0], '%Y-%m-%d %H:%M:%S')\n", (2072, 2099), False, 'import datetime\n')]
|
# -*- coding: utf-8 -*-
u"""Implements a state machine for the parsing process.
"""
# TODO: translation API
from __future__ import absolute_import
from httoop.exceptions import Invalid, InvalidBody, InvalidHeader, InvalidLine, InvalidURI
from httoop.header import Headers
from httoop.messages import Message
from httoop.status import BAD_REQUEST, NOT_IMPLEMENTED
from httoop.util import Unicode, _, integer
CR = b'\r'
LF = b'\n'
CRLF = CR + LF
NOT_RECEIVED_YET = True
class StateMachine(object):
u"""A protocol state machine which supports pipelining and
parses HTTP messages by turning them into appropriate objects."""
Message = Message # subclass provides the type
def __init__(self):
self.buffer = bytearray()
self.message = None
def _reset_state(self):
self.message = self.Message()
self.trailers = None
self.line_end = CRLF
self.message_length = None
self.chunked = False
self.state = dict(
startline=False,
protocol=False,
headers=False,
body=False,
trailer=False,
)
def on_message_started(self):
self._reset_state()
def on_startline_complete(self):
self.state['protocol'] = True
self.on_protocol_complete()
def on_method_complete(self):
pass
def on_uri_complete(self):
pass
def on_protocol_complete(self):
pass
def on_headers_complete(self):
self.set_body_content_encoding()
self.set_body_content_type()
def on_body_complete(self):
self.message.body.seek(0)
self.message.body.decompress()
self.message.body.seek(0)
self.set_content_length()
def on_message_complete(self):
message = self.message
self.message = None
return message
def parse(self, data):
u"""Appends the given data to the internal buffer
and parses it as HTTP Request-Messages.
:param data:
data to parse
:type data: bytes
"""
self.buffer.extend(data)
try:
return tuple(x for x in self._parse() if x is not None)
except (InvalidHeader, InvalidLine, InvalidURI, InvalidBody) as exc:
raise BAD_REQUEST(Unicode(exc))
def _parse(self):
while self.buffer:
if self.message is None:
yield self.on_message_started()
state = self.state
if not state['startline']:
if self.parse_startline():
return
state['startline'] = True
yield self.on_startline_complete()
if not state['headers']:
if self.parse_headers():
return
state['headers'] = True
yield self.on_headers_complete()
if not state['body']:
if self.parse_body():
return
state['body'] = True
yield self.on_body_complete()
yield self.on_message_complete()
def parse_startline(self):
if CRLF not in self.buffer:
if LF not in self.buffer:
return NOT_RECEIVED_YET
self.line_end = LF
requestline, self.buffer = self.buffer.split(self.line_end, 1)
# parse request line
try:
self.message.parse(bytes(requestline))
except (InvalidLine, InvalidURI) as exc:
raise BAD_REQUEST(Unicode(exc))
def parse_headers(self):
# empty headers?
if self.buffer.startswith(self.line_end):
self.buffer = self.buffer[len(self.line_end):]
return False
header_end = self.line_end + self.line_end
if header_end not in self.buffer:
self._parse_single_headers()
# headers incomplete
return NOT_RECEIVED_YET
headers, self.buffer = self.buffer.split(header_end, 1)
self._parse_header(headers)
def _parse_single_headers(self):
if self.buffer.endswith(self.line_end):
headers, _, rest = self.buffer[:-len(self.line_end)].rpartition(self.line_end)
rest += self.buffer[-len(self.line_end):]
else:
headers, _, rest = self.buffer.rpartition(self.line_end)
if headers and _ and rest[:1] not in (b'', b'\t', b' '):
self.buffer = rest
self._parse_header(headers)
def _parse_header(self, headers):
# parse headers
if headers:
try:
self.message.headers.parse(bytes(headers))
except InvalidHeader as exc:
raise BAD_REQUEST(Unicode(exc))
def parse_body(self):
if self.message_length is None and not self.chunked:
self.determine_message_length()
if self.chunked:
return self.parse_chunked_body()
elif self.message_length:
return self.parse_body_with_message_length()
else:
return False # no message body
def determine_message_length(self):
# RFC 2616 Section 4.4
# get message length
# TODO: check if both is set
message = self.message
if 'Transfer-Encoding' in message.headers and message.protocol >= (1, 1):
# chunked transfer in HTTP/1.1
te = message.headers['Transfer-Encoding'].lower()
self.chunked = 'chunked' == te
if not self.chunked:
raise NOT_IMPLEMENTED(u'Unknown HTTP/1.1 Transfer-Encoding: %r' % te)
else:
# Content-Length header defines the length of the message body
try:
self.message_length = integer(message.headers.get("Content-Length", "0"))
if self.message_length < 0:
self.message_length = None
raise ValueError()
except ValueError:
raise BAD_REQUEST(_(u'Invalid Content-Length header.'))
def parse_body_with_message_length(self):
body, self.buffer = self.buffer[:self.message_length], self.buffer[self.message_length:]
self.message.body.parse(bytes(body))
blen = len(body)
unfinished = blen < self.message_length
self.message_length -= blen
if unfinished:
# the body is not yet received completely
return NOT_RECEIVED_YET
def parse_chunked_body(self):
if self.state['trailer']:
return self.parse_trailers()
if self.line_end not in self.buffer:
# chunk size info not received yet
return NOT_RECEIVED_YET
chunk_size, rest_chunk = self.__parse_chunk_size()
if len(rest_chunk) < (len(self.line_end) + chunk_size):
# chunk not received completely
return NOT_RECEIVED_YET
body_part, rest_chunk = rest_chunk[:chunk_size], rest_chunk[chunk_size:]
self.message.body.parse(bytes(body_part))
self.buffer = rest_chunk
if chunk_size == 0:
self.state['trailer'] = True
return self.parse_trailers()
if not rest_chunk.startswith(self.line_end):
raise InvalidBody(_(u'Invalid chunk terminator: %r'), rest_chunk[:2].decode('ISO8859-1'))
self.buffer = self.buffer[len(self.line_end):]
# next chunk
return self.parse_chunked_body()
def __parse_chunk_size(self):
line, rest_chunk = self.buffer.split(self.line_end, 1)
_chunk_size = line.split(b";", 1)[0].strip()
try:
chunk_size = integer(bytes(_chunk_size), 16)
if chunk_size < 0:
raise ValueError()
except (ValueError, OverflowError):
exc = InvalidHeader(_(u'Invalid chunk size: %r'), _chunk_size.decode('ISO8859-1'))
raise BAD_REQUEST(Unicode(exc))
else:
return chunk_size, rest_chunk
def parse_trailers(self):
# TODO: the code is exactly the same as parse_headers but
# we have to make sure no invalid header fields are send (only values told in Trailer header allowed)
if self.buffer.startswith(self.line_end):
self.buffer = self.buffer[len(self.line_end):]
return False # no trailers
trailer_end = self.line_end + self.line_end
if trailer_end not in self.buffer:
# not received yet
return NOT_RECEIVED_YET
trailers, self.buffer = self.buffer.split(trailer_end, 1)
self.trailers = Headers()
try:
self.trailers.parse(bytes(trailers))
except InvalidHeader as exc:
exc = InvalidHeader(_(u'Invalid trailers: %r'), Unicode(exc))
raise BAD_REQUEST(Unicode(exc))
self.merge_trailer_into_header()
return False
def merge_trailer_into_header(self):
message = self.message
for name in message.headers.values('Trailer'):
value = self.trailers.pop(name, None)
if value is not None:
message.headers.append(name, value)
if self.trailers:
msg_trailers = u'" ,"'.join(self.trailers.keys())
raise BAD_REQUEST(u'untold trailers: "%s"' % msg_trailers)
del self.trailers
def set_body_content_encoding(self):
if 'Content-Encoding' in self.message.headers:
try:
self.message.body.content_encoding = self.message.headers.element('Content-Encoding')
self.message.body.content_encoding.codec # pylint: disable=W0104
except Invalid as exc:
raise NOT_IMPLEMENTED(Unicode(exc))
def set_body_content_type(self):
if 'Content-Type' in self.message.headers:
self.message.body.mimetype = self.message.headers.element('Content-Type')
def set_content_length(self):
if 'Content-Length' not in self.message.headers:
self.message.headers['Content-Length'] = str(len(self.message.body)).encode('ASCII')
if self.chunked:
self.message.headers.pop('Transfer-Encoding') # FIXME: there could be other transfer codings as well, only pop out chunked!
|
[
"httoop.header.Headers",
"httoop.status.BAD_REQUEST",
"httoop.status.NOT_IMPLEMENTED",
"httoop.util._",
"httoop.util.Unicode"
] |
[((7145, 7154), 'httoop.header.Headers', 'Headers', ([], {}), '()\n', (7152, 7154), False, 'from httoop.header import Headers\n'), ((7685, 7737), 'httoop.status.BAD_REQUEST', 'BAD_REQUEST', (['(u\'untold trailers: "%s"\' % msg_trailers)'], {}), '(u\'untold trailers: "%s"\' % msg_trailers)\n', (7696, 7737), False, 'from httoop.status import BAD_REQUEST, NOT_IMPLEMENTED\n'), ((4584, 4647), 'httoop.status.NOT_IMPLEMENTED', 'NOT_IMPLEMENTED', (["(u'Unknown HTTP/1.1 Transfer-Encoding: %r' % te)"], {}), "(u'Unknown HTTP/1.1 Transfer-Encoding: %r' % te)\n", (4599, 4647), False, 'from httoop.status import BAD_REQUEST, NOT_IMPLEMENTED\n'), ((6008, 6042), 'httoop.util._', '_', (['u"""Invalid chunk terminator: %r"""'], {}), "(u'Invalid chunk terminator: %r')\n", (6009, 6042), False, 'from httoop.util import Unicode, _, integer\n'), ((2002, 2014), 'httoop.util.Unicode', 'Unicode', (['exc'], {}), '(exc)\n', (2009, 2014), False, 'from httoop.util import Unicode, _, integer\n'), ((2921, 2933), 'httoop.util.Unicode', 'Unicode', (['exc'], {}), '(exc)\n', (2928, 2933), False, 'from httoop.util import Unicode, _, integer\n'), ((6477, 6505), 'httoop.util._', '_', (['u"""Invalid chunk size: %r"""'], {}), "(u'Invalid chunk size: %r')\n", (6478, 6505), False, 'from httoop.util import Unicode, _, integer\n'), ((6561, 6573), 'httoop.util.Unicode', 'Unicode', (['exc'], {}), '(exc)\n', (6568, 6573), False, 'from httoop.util import Unicode, _, integer\n'), ((7256, 7282), 'httoop.util._', '_', (['u"""Invalid trailers: %r"""'], {}), "(u'Invalid trailers: %r')\n", (7257, 7282), False, 'from httoop.util import Unicode, _, integer\n'), ((7284, 7296), 'httoop.util.Unicode', 'Unicode', (['exc'], {}), '(exc)\n', (7291, 7296), False, 'from httoop.util import Unicode, _, integer\n'), ((7319, 7331), 'httoop.util.Unicode', 'Unicode', (['exc'], {}), '(exc)\n', (7326, 7331), False, 'from httoop.util import Unicode, _, integer\n'), ((3907, 3919), 'httoop.util.Unicode', 'Unicode', (['exc'], {}), '(exc)\n', (3914, 3919), False, 'from httoop.util import Unicode, _, integer\n'), ((4940, 4976), 'httoop.util._', '_', (['u"""Invalid Content-Length header."""'], {}), "(u'Invalid Content-Length header.')\n", (4941, 4976), False, 'from httoop.util import Unicode, _, integer\n'), ((8066, 8078), 'httoop.util.Unicode', 'Unicode', (['exc'], {}), '(exc)\n', (8073, 8078), False, 'from httoop.util import Unicode, _, integer\n')]
|
"""
Tests for preferences route
"""
import json
from fastapi.testclient import TestClient
from config.settings import settings
jwt_test = settings.test_jwt
header = {"Authorization": f"Bearer {jwt_test}"}
ROUTE = "/preferences/"
body = json.dumps(
{
"email": "string",
"preferences": ["EVENTS", "AMBIENCE", "MARKETING"],
}
)
def post_preferences_first_fill(client: TestClient):
"""
post prefs for first time and check prefs
"""
post_response = client.post(url=ROUTE, data=body, headers=header)
assert post_response.status_code == 200
assert post_response.json() == {"status": True}
def post_preferences_another_fill(client: TestClient):
"""
post prefs for another time and check prefs
"""
post_response = client.post(url=ROUTE, data=body, headers=header)
assert post_response.status_code == 403
assert post_response.json() == {"detail": "Preferences Already Filled"}
def check_preferences_already_filled(client: TestClient):
"""
check prefs
"""
get_response = client.get(url=ROUTE, headers=header)
assert get_response.status_code == 200
assert get_response.json() == {"status": True}
|
[
"json.dumps"
] |
[((240, 327), 'json.dumps', 'json.dumps', (["{'email': 'string', 'preferences': ['EVENTS', 'AMBIENCE', 'MARKETING']}"], {}), "({'email': 'string', 'preferences': ['EVENTS', 'AMBIENCE',\n 'MARKETING']})\n", (250, 327), False, 'import json\n')]
|
#-*- coding: utf-8 -*-
# vim: set fileencoding=utf-8
"""
settings.py
Configuration for Flask app
Important: Place your keys in the secret_keys.py module,
which should be kept out of version control.
"""
import logging
import os
import urllib
from secret_keys import CSRF_SECRET_KEY, SESSION_KEY
class Config(object):
"""
Default configuration
"""
#Production is the
ENV_PRODUCTION = 'PRODUCTION'
#Staging is used for testing replicating the same production environment
ENV_STAGING = 'STAGING'
#Done sessions cant be modified
ENV_LOCAL = 'LOCAL'
ENVIRONMENT_CHOICES = [
ENV_PRODUCTION,
ENV_STAGING,
ENV_LOCAL,
]
DEBUG = False
TESTING = False
STAGING = False
PRODUCTION = False
CSRF_ENABLED = True
# Set secret keys for CSRF protection
SECRET_KEY = CSRF_SECRET_KEY
CSRF_SESSION_KEY = SESSION_KEY
OAUTH2_SCOPE = ""
EMAIL_REGEXP = "^[a-zA-Z0-9'._-]+@[a-zA-Z0-9._-]+.[a-zA-Z]{2,6}$"
class ProductionConfig(Config):
"""
Overrides the default configuration
"""
DEBUG = False
TESTING = False
STAGING = False
PRODUCTION = True
CSRF_ENABLED = True
class TestingConfig(Config):
"""
Configuration used for development and testing
"""
DEBUG = False
TESTING = True
PRODUCTION = False
CSRF_ENABLED = False
class DevelopmentConfig(TestingConfig):
"""
Configuration used for local development
"""
DEFAULT_SERVER_NAME = 'localhost:8080'
CUSTOM_SERVER_NAME = 'localhost:8080'
def get_setting(key):
"""
Get the value for a setting with the given key, since cache is shared
between staging and production is necessary to include that in the key too
:param key: string that represents the setting key
:return: the value of the setting
"""
try:
from main import flask_app
return flask_app.config[key]
except:
environment = get_environment()
#Load settings from the corresponding class
if environment == Config.ENV_PRODUCTION:
obj = ProductionConfig()
else:
obj = TestingConfig()
return getattr(obj, key)
def get_environment():
"""
Returns the environment based on the OS variable, server name and app id
:return: The current environment that the app is running on
"""
# Auto-set settings object based on App Engine dev environ
if 'SERVER_SOFTWARE' in os.environ:
if os.environ['SERVER_SOFTWARE'].startswith('Dev'):
return Config.ENV_LOCAL
elif os.environ['SERVER_SOFTWARE'].startswith('Google App Engine/'):
#For considering an environment staging we assume the version id
# contains -staging and the URL
current_version_id = str(os.environ['CURRENT_VERSION_ID']) if (
'CURRENT_VERSION_ID') in os.environ else ''
if '-staging' in current_version_id:
return Config.ENV_STAGING
#If not local or staging then is production TODO: really?
return Config.ENV_PRODUCTION
return Config.ENV_LOCAL
def get_raw_server_name():
"""
The raw server name is GAE generated by default, it's meant for a
specific version of an app. The version ID is taken from OS variable and
the ID from the identity API
:return: URL in the form version.appid.appspot.com
"""
from google.appengine.api import app_identity
return '%s.%s.appspot.com' % (os.environ[
'CURRENT_VERSION_ID'].split('.')[0], app_identity.get_application_id())
def get_url():
"""Returns the URL of the page currently being served.
Returns:
The full URL of the page currently being served.
"""
if os.environ['SERVER_PORT'] == '80':
scheme = 'http://'
else:
scheme = 'https://'
host = os.environ['SERVER_NAME']
script_name = urllib.quote(os.environ.get('SCRIPT_NAME', ''))
path_info = urllib.quote(os.environ.get('PATH_INFO', ''))
qs = os.environ.get('QUERY_STRING', '')
if qs:
qs = '?' + qs
return scheme + host + script_name + path_info + qs
|
[
"os.environ.get",
"google.appengine.api.app_identity.get_application_id"
] |
[((4038, 4072), 'os.environ.get', 'os.environ.get', (['"""QUERY_STRING"""', '""""""'], {}), "('QUERY_STRING', '')\n", (4052, 4072), False, 'import os\n'), ((3932, 3965), 'os.environ.get', 'os.environ.get', (['"""SCRIPT_NAME"""', '""""""'], {}), "('SCRIPT_NAME', '')\n", (3946, 3965), False, 'import os\n'), ((3996, 4027), 'os.environ.get', 'os.environ.get', (['"""PATH_INFO"""', '""""""'], {}), "('PATH_INFO', '')\n", (4010, 4027), False, 'import os\n'), ((3573, 3606), 'google.appengine.api.app_identity.get_application_id', 'app_identity.get_application_id', ([], {}), '()\n', (3604, 3606), False, 'from google.appengine.api import app_identity\n')]
|
# coding: utf-8
# -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
import datetime
import unittest
import azure.mgmt.consumption
import azure.mgmt.consumption.models
from devtools_testutils import AzureMgmtTestCase, ResourceGroupPreparer
class MgmtConsumptionTest(AzureMgmtTestCase):
def setUp(self):
super(MgmtConsumptionTest, self).setUp()
self.consumption_client = self.create_mgmt_client(
azure.mgmt.consumption.ConsumptionManagementClient
)
@ResourceGroupPreparer()
def test_budgets(self, resource_group):
SUBSCRIPTION_ID = getattr(self.settings, 'SUBSCRIPTION_ID', "123")
SCOPE = '/subscriptions/{}/resourceGroups/{}'.format(SUBSCRIPTION_ID, resource_group.name)
BUDGET_NAME = self.get_resource_name('budget')
# create
BODY = {
"category": "Cost",
"amount": '100',
"timeGrain": "Monthly",
"timePeriod": {
"startDate": "2020-10-01T00:00:00Z",
"endDate": "2021-10-31T00:00:00Z"
}
}
self.consumption_client.budgets.create_or_update(SCOPE, BUDGET_NAME, BODY)
# get
self.consumption_client.budgets.get(SCOPE, BUDGET_NAME)
# delete
self.consumption_client.budgets.delete(SCOPE, BUDGET_NAME)
# ------------------------------------------------------------------------------
if __name__ == '__main__':
unittest.main()
|
[
"unittest.main",
"devtools_testutils.ResourceGroupPreparer"
] |
[((756, 779), 'devtools_testutils.ResourceGroupPreparer', 'ResourceGroupPreparer', ([], {}), '()\n', (777, 779), False, 'from devtools_testutils import AzureMgmtTestCase, ResourceGroupPreparer\n'), ((1700, 1715), 'unittest.main', 'unittest.main', ([], {}), '()\n', (1713, 1715), False, 'import unittest\n')]
|
import argparse
from chess_player.ChessGame import ChessGame
from chess_player.Player import SearchPlayer, RandomPlayer
from chess_player.Scorer import SimplifiedEvaluationFunction
from random import randint
from time import perf_counter
class Program:
@staticmethod
def main():
program = Program()
program.run()
def __init__(self):
self._args = None
self._parser = argparse.ArgumentParser(description="Play some chess")
self._scorer = SimplifiedEvaluationFunction()
self._stronger = None
def run(self):
self.add_arguments_and_parse()
print(f"Playing chess with a search depth of {self._args.search_depth} and {self._args.max_children} "
f"children for {self._args.max_turns} turns.")
with open(self._args.outfile, 'a') as outfile:
for i in range(self._args.iterations):
print(f"Playing game iteration {i}...")
start_time = perf_counter()
termination, winner_won, played_turns = self.play_chess()
run_time = perf_counter() - start_time
print(f"This game took: {run_time:0.2f} seconds...\n\n")
outfile.write(f"{self._args.search_depth},{self._args.max_children},{self._args.max_turns},"
f"{run_time:0.2f},{termination},{self._stronger},{winner_won},{played_turns}\n")
def play_chess(self) -> (str, str, int):
search_depth = self._args.search_depth
max_children = self._args.max_children
# flip a coin to see who gets the better player (search vs. random)
if randint(0, 1) == 0:
print(f"The white player should be stronger.")
white_player = SearchPlayer(
search_depth=search_depth, max_children=max_children, scorer=self._scorer)
black_player = RandomPlayer()
self._stronger = 'White stronger'
else:
print(f"The black player should be stronger.")
white_player = RandomPlayer()
black_player = SearchPlayer(
search_depth=search_depth, max_children=max_children, scorer=self._scorer)
self._stronger = 'Black stronger'
game = ChessGame(white_player=white_player, black_player=black_player)
game.play_until(self._args.max_turns)
game.print_game_stats()
termination, winner_won, played_turns = game.get_results()
game.reset()
return termination, winner_won, played_turns
def add_arguments_and_parse(self):
self._parser.add_argument("--search-depth", dest="search_depth", required=False, default=3, type=int,
help="How many levels deep to search for a good move. Default: 3")
self._parser.add_argument("--max-children", dest="max_children", required=False, default=5, type=int,
help="How many random legal moves will be evaluated. Default: 5")
self._parser.add_argument("--max-turns", dest="max_turns", required=False, default=150, type=int,
help="Play until this many turns have been played. Default: 150")
self._parser.add_argument("--iterations", dest="iterations", required=False, default=10, type=int,
help="How many iterations to run with this configuration. Default: 10")
self._parser.add_argument("--outfile", dest="outfile", required=False, default='chess-results.csv', type=str,
help="Where to write the results. Default: chess-results.csv")
self._args = self._parser.parse_args()
if __name__ == "__main__":
Program.main()
|
[
"random.randint",
"argparse.ArgumentParser",
"chess_player.Player.RandomPlayer",
"chess_player.Scorer.SimplifiedEvaluationFunction",
"time.perf_counter",
"chess_player.Player.SearchPlayer",
"chess_player.ChessGame.ChessGame"
] |
[((413, 467), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Play some chess"""'}), "(description='Play some chess')\n", (436, 467), False, 'import argparse\n'), ((491, 521), 'chess_player.Scorer.SimplifiedEvaluationFunction', 'SimplifiedEvaluationFunction', ([], {}), '()\n', (519, 521), False, 'from chess_player.Scorer import SimplifiedEvaluationFunction\n'), ((2247, 2310), 'chess_player.ChessGame.ChessGame', 'ChessGame', ([], {'white_player': 'white_player', 'black_player': 'black_player'}), '(white_player=white_player, black_player=black_player)\n', (2256, 2310), False, 'from chess_player.ChessGame import ChessGame\n'), ((1639, 1652), 'random.randint', 'randint', (['(0)', '(1)'], {}), '(0, 1)\n', (1646, 1652), False, 'from random import randint\n'), ((1745, 1837), 'chess_player.Player.SearchPlayer', 'SearchPlayer', ([], {'search_depth': 'search_depth', 'max_children': 'max_children', 'scorer': 'self._scorer'}), '(search_depth=search_depth, max_children=max_children, scorer=\n self._scorer)\n', (1757, 1837), False, 'from chess_player.Player import SearchPlayer, RandomPlayer\n'), ((1877, 1891), 'chess_player.Player.RandomPlayer', 'RandomPlayer', ([], {}), '()\n', (1889, 1891), False, 'from chess_player.Player import SearchPlayer, RandomPlayer\n'), ((2038, 2052), 'chess_player.Player.RandomPlayer', 'RandomPlayer', ([], {}), '()\n', (2050, 2052), False, 'from chess_player.Player import SearchPlayer, RandomPlayer\n'), ((2080, 2172), 'chess_player.Player.SearchPlayer', 'SearchPlayer', ([], {'search_depth': 'search_depth', 'max_children': 'max_children', 'scorer': 'self._scorer'}), '(search_depth=search_depth, max_children=max_children, scorer=\n self._scorer)\n', (2092, 2172), False, 'from chess_player.Player import SearchPlayer, RandomPlayer\n'), ((974, 988), 'time.perf_counter', 'perf_counter', ([], {}), '()\n', (986, 988), False, 'from time import perf_counter\n'), ((1090, 1104), 'time.perf_counter', 'perf_counter', ([], {}), '()\n', (1102, 1104), False, 'from time import perf_counter\n')]
|
# -*- coding: utf-8 -*-
from bouser.utils import as_json
__author__ = 'viruzzz-kun'
class Message(object):
immediate = True # Мгновенное сообщение: будет ставиться в очередь, не пишется в лог
secondary = False # Вторичное сообщение: не пишется в лог
control = False # Управляющее сообщение: для управления клиентами
magic = None # Магическое число для RPC
topic = None # тема сообщения
sender = None # User ID отправителя
recipient = None # User ID получателя
envelope = False # Пачка
def __init__(self):
self.tags = set()
self.hops = []
self.data = None
def make_magic(self):
import os
self.magic = os.urandom(16)
def __json__(self):
return {
'i': bool(self.immediate),
's': bool(self.secondary),
'envelope': bool(self.envelope),
'ctrl': self.control,
'topic': self.topic,
'magic': self.magic,
'sender': self.sender,
'recipient': self.recipient,
'tags': sorted(self.tags),
'data': self.data,
'hops': self.hops,
}
@classmethod
def from_json(cls, j):
result = cls()
result.merge_with_dict(j)
return result
def merge_with_dict(self, j):
self.control = j.get('ctrl', False)
self.magic = j.get('magic', None)
self.topic = j.get('topic', None)
self.sender = j.get('sender')
self.recipient = j.get('recipient')
self.tags = set(j.get('tags', []))
self.data = j.get('data')
self.immediate = j.get('i', True)
self.secondary = j.get('s', False)
self.envelope = j.get('envelope', False)
self.hops = j.get('hops', [])
|
[
"os.urandom"
] |
[((712, 726), 'os.urandom', 'os.urandom', (['(16)'], {}), '(16)\n', (722, 726), False, 'import os\n')]
|
# -*- coding: utf-8 -*-
# Copyright (C) 2019 by <NAME>
#
# Permission to use, copy, modify, and/or distribute this software for any purpose with or without fee is hereby
# granted.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT,
# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER
# IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
# PERFORMANCE OF THIS SOFTWARE.
from flask import current_app
from pycflare import CloudFlare as PyCloudFlare
from werkzeug.local import LocalProxy
cloudflare = LocalProxy(lambda: current_app.extensions['cloudflare'])
class CloudFlare(object):
def __init__(self, app=None):
self.app = app
self.cf = None
if app is not None:
self.init_app(app)
def init_app(self, app):
auth_email = app.config.get('CLOUDFLARE_AUTH_EMAIL')
auth_key = app.config.get('CLOUDFLARE_AUTH_KEY')
redis_compat = app.config.get('CLOUDFLARE_ENABLE_REDIS_COMPATIBILITY', False)
if auth_key is None or auth_email is None:
raise RuntimeError("You must provide your CloudFlare AUTH_EMAIL and AUTH_KEY via CLOUDFLARE_AUTH_EMAIL and "
"CLOUDFLARE_AUTH_KEY in the app.config.")
self.cf = PyCloudFlare(auth_email=auth_email, auth_key=auth_key, enable_redis_compatibility=redis_compat)
app.extensions['cloudflare'] = self
def register_account(self, account_id, name):
return self.cf.register_account(account_id, name)
def __getattr__(self, item):
try:
return self.cf.__getattribute__(item)
except AttributeError:
print("CloudFlare: Attribute {value} was not found.".format(value=item))
return None
|
[
"pycflare.CloudFlare",
"werkzeug.local.LocalProxy"
] |
[((810, 867), 'werkzeug.local.LocalProxy', 'LocalProxy', (["(lambda : current_app.extensions['cloudflare'])"], {}), "(lambda : current_app.extensions['cloudflare'])\n", (820, 867), False, 'from werkzeug.local import LocalProxy\n'), ((1531, 1630), 'pycflare.CloudFlare', 'PyCloudFlare', ([], {'auth_email': 'auth_email', 'auth_key': 'auth_key', 'enable_redis_compatibility': 'redis_compat'}), '(auth_email=auth_email, auth_key=auth_key,\n enable_redis_compatibility=redis_compat)\n', (1543, 1630), True, 'from pycflare import CloudFlare as PyCloudFlare\n')]
|
from collections import OrderedDict
class Sample(object):
''' hold sample data for a variant (e.g. genotype call, allele depths)
'''
# set up a class variable that will be shared by all Sample instances, ie,
# all individuals for a given variant. That way they have a common set of
# keys for the variant.
fields = OrderedDict()
@classmethod
def set_format(cls_obj, fields):
cls_obj.fields = OrderedDict()
for key in fields.split(':'):
if key == '':
continue
cls_obj.fields[key] = None
def __init__(self, sample):
self.data = {}
if sample == '.':
sample = '.:' * len(self.fields)
sample = sample[:-1]
sample = sample.split(':')
if len(sample) != len(self.fields):
raise ValueError('sample data should match expected fields')
for key, value in zip(self.fields, sample):
self[key] = value
def __str__(self):
return ':'.join(map(str, (self[x] for x in self.fields)))
def keys(self):
return list(self.fields)
def __getitem__(self, key):
if key not in self.fields:
raise KeyError
# if a key isn't present in one sample (because the key was only used
# for a subset of samples), then use missing value for other samples
if key not in self.data:
return '.'
return self.data[key]
def __setitem__(self, key, value):
if key == '': # key must not be blank
return
# new keys need be tracked for all samples
if key not in self.fields:
self.fields[key] = None
self.data[key] = value
def __contains__(self, key):
return key in self.fields
def __delitem__(self, key):
self.data[key] = '.'
def __hash__(self):
return hash(tuple(self[x] for x in self.fields))
def __eq__(self, other):
return self.fields == other.fields and hash(self) == hash(other)
class Samples(object):
def __init__(self, fields, samples):
Sample.set_format(fields)
self.samples = [ Sample(x) for x in samples ]
self.idx = -1
def __str__(self):
data = [':'.join(Sample.fields)] + list(map(str, self.samples))
return '\t'.join(data)
def __iter__(self):
return self
def __next__(self):
self.idx += 1
if self.idx >= len(self):
self.idx = -1
raise StopIteration
return self.samples[self.idx]
def __len__(self):
return len(self.samples)
def __getitem__(self, idx):
return self.samples[idx]
|
[
"collections.OrderedDict"
] |
[((341, 354), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (352, 354), False, 'from collections import OrderedDict\n'), ((434, 447), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (445, 447), False, 'from collections import OrderedDict\n')]
|
from flask import url_for
from tests import FrontendWithAdminTestBase
from tests.factories import RoomFactory, SubnetFactory, PatchPortFactory
class UserLogTestBase(FrontendWithAdminTestBase):
def get_logs(self, user_id=None, **kw):
"""Request the logs, assert validity, and return the response.
By default, the logs are fetched for the user logging in.
The following assertions are made:
* The response code is 200
* The response content_type contains ``"json"``
* The response's JSON contains an ``"items"`` key
:returns: ``response.json['items']``
"""
if user_id is None:
user_id = self.user_id
log_endpoint = url_for('user.user_show_logs_json',
user_id=user_id,
**kw)
response = self.assert_response_code(log_endpoint, code=200)
assert "json" in response.content_type.lower()
json = response.json
assert json.get('items') is not None
return json['items']
class UserFrontendTestBase(FrontendWithAdminTestBase):
def create_factories(self):
super().create_factories()
self.room = RoomFactory()
self.subnet = SubnetFactory()
self.patch_port = PatchPortFactory(room=self.room, patched=True,
switch_port__switch__host__owner=self.admin)
# 2. A pool of default vlans so an IP can be found
self.patch_port.switch_port.default_vlans.append(self.subnet.vlan)
|
[
"tests.factories.RoomFactory",
"flask.url_for",
"tests.factories.SubnetFactory",
"tests.factories.PatchPortFactory"
] |
[((721, 779), 'flask.url_for', 'url_for', (['"""user.user_show_logs_json"""'], {'user_id': 'user_id'}), "('user.user_show_logs_json', user_id=user_id, **kw)\n", (728, 779), False, 'from flask import url_for\n'), ((1213, 1226), 'tests.factories.RoomFactory', 'RoomFactory', ([], {}), '()\n', (1224, 1226), False, 'from tests.factories import RoomFactory, SubnetFactory, PatchPortFactory\n'), ((1249, 1264), 'tests.factories.SubnetFactory', 'SubnetFactory', ([], {}), '()\n', (1262, 1264), False, 'from tests.factories import RoomFactory, SubnetFactory, PatchPortFactory\n'), ((1291, 1386), 'tests.factories.PatchPortFactory', 'PatchPortFactory', ([], {'room': 'self.room', 'patched': '(True)', 'switch_port__switch__host__owner': 'self.admin'}), '(room=self.room, patched=True,\n switch_port__switch__host__owner=self.admin)\n', (1307, 1386), False, 'from tests.factories import RoomFactory, SubnetFactory, PatchPortFactory\n')]
|
"""
Set of unit test for SourceBase class
"""
import pytest
from mycroft_holmes.errors import MycroftSourceError
from mycroft_holmes.sources.base import SourceBase
from mycroft_holmes.sources import ConstSource
def test_get_sources_names():
sources = SourceBase.get_sources_names()
print(sources)
assert 'common/const' in sources
def test_new_from_name():
source = SourceBase.new_from_name('common/const')
print(source)
assert isinstance(source, ConstSource), 'ConstSource should be returned by SourceBase.new_from_name'
assert source.get_value() == 1
def test_new_from_name_missing():
with pytest.raises(MycroftSourceError):
SourceBase.new_from_name('foo/missing-source')
def test_get_description():
source = ConstSource()
print(source.get_description())
assert source.get_name() == 'common/const'
assert source.get_short_description() == \
'Returns a constant value (can be used to tweak a score of a feature).'
assert source.get_description() == """
Returns a constant value (can be used to tweak a score of a feature).
#### `metrics` config
```yaml
metrics:
- name: common/const
weight: 100
```
""".strip()
|
[
"pytest.raises",
"mycroft_holmes.sources.ConstSource",
"mycroft_holmes.sources.base.SourceBase.get_sources_names",
"mycroft_holmes.sources.base.SourceBase.new_from_name"
] |
[((258, 288), 'mycroft_holmes.sources.base.SourceBase.get_sources_names', 'SourceBase.get_sources_names', ([], {}), '()\n', (286, 288), False, 'from mycroft_holmes.sources.base import SourceBase\n'), ((387, 427), 'mycroft_holmes.sources.base.SourceBase.new_from_name', 'SourceBase.new_from_name', (['"""common/const"""'], {}), "('common/const')\n", (411, 427), False, 'from mycroft_holmes.sources.base import SourceBase\n'), ((765, 778), 'mycroft_holmes.sources.ConstSource', 'ConstSource', ([], {}), '()\n', (776, 778), False, 'from mycroft_holmes.sources import ConstSource\n'), ((632, 665), 'pytest.raises', 'pytest.raises', (['MycroftSourceError'], {}), '(MycroftSourceError)\n', (645, 665), False, 'import pytest\n'), ((675, 721), 'mycroft_holmes.sources.base.SourceBase.new_from_name', 'SourceBase.new_from_name', (['"""foo/missing-source"""'], {}), "('foo/missing-source')\n", (699, 721), False, 'from mycroft_holmes.sources.base import SourceBase\n')]
|
import tkinter
from .customtkinter_frame import CTkFrame
from .appearance_mode_tracker import AppearanceModeTracker
from .customtkinter_color_manager import CTkColorManager
class CTkProgressBar(tkinter.Frame):
def __init__(self,
bg_color=None,
border_color=CTkColorManager.PROGRESS_BG,
fg_color=CTkColorManager.PROGRESS_BG,
progress_color=CTkColorManager.MAIN,
width=160,
height=10,
border_width=0,
*args, **kwargs):
super().__init__(*args, **kwargs)
AppearanceModeTracker.add(self.change_appearance_mode)
if bg_color is None:
if isinstance(self.master, CTkFrame):
self.bg_color = self.master.fg_color
else:
self.bg_color = self.master.cget("bg")
else:
self.bg_color = bg_color
self.border_color = border_color
self.fg_color = fg_color
self.progress_color = progress_color
self.appearance_mode = AppearanceModeTracker.get_mode() # 0: "Light" 1: "Dark"
self.width = width
self.height = height
self.border_width = border_width
self.value = 0.5
self.configure(width=self.width, height=self.height)
self.canvas = tkinter.Canvas(master=self,
highlightthicknes=0,
width=self.width,
height=self.height)
self.canvas.place(x=0, y=0)
self.border_parts = []
self.fg_parts = []
self.progress_parts = []
self.draw()
def draw(self):
self.canvas.delete("all")
self.border_parts = []
self.fg_parts = []
self.progress_parts = []
# frame_border
self.border_parts.append(self.canvas.create_oval(0, 0,
self.height, self.height))
self.border_parts.append(self.canvas.create_rectangle(self.height/2, 0,
self.width-(self.height/2), self.height))
self.border_parts.append(self.canvas.create_oval(self.width-self.height, 0,
self.width, self.height))
# foreground
self.fg_parts.append(self.canvas.create_oval(self.border_width, self.border_width,
self.height-self.border_width, self.height-self.border_width))
self.fg_parts.append(self.canvas.create_rectangle(self.height/2, self.border_width,
self.width-(self.height/2), self.height-self.border_width))
self.fg_parts.append(self.canvas.create_oval(self.width-self.height+self.border_width, self.border_width,
self.width-self.border_width, self.height-self.border_width))
if type(self.bg_color) == tuple:
self.canvas.configure(bg=self.bg_color[self.appearance_mode])
else:
self.canvas.configure(bg=self.bg_color)
for part in self.border_parts:
if type(self.border_color) == tuple:
self.canvas.itemconfig(part, fill=self.border_color[self.appearance_mode], width=0)
else:
self.canvas.itemconfig(part, fill=self.border_color, width=0)
for part in self.fg_parts:
if type(self.fg_color) == tuple:
self.canvas.itemconfig(part, fill=self.fg_color[self.appearance_mode], width=0)
else:
self.canvas.itemconfig(part, fill=self.fg_color, width=0)
self.set(self.value)
def set(self, value):
self.value = value
if self.value > 1:
self.value = 1
elif self.value < 0:
self.value = 0
for part in self.progress_parts:
self.canvas.delete(part)
# progress
self.progress_parts.append(self.canvas.create_oval(self.border_width,
self.border_width,
self.height - self.border_width,
self.height - self.border_width))
self.progress_parts.append(self.canvas.create_rectangle(self.height / 2,
self.border_width,
self.height / 2 + (self.width - self.height) * self.value,
self.height - self.border_width))
self.progress_parts.append(self.canvas.create_oval(self.height / 2 + (self.width - self.height) * self.value - (self.height) / 2 + self.border_width,
self.border_width,
self.height / 2 + (self.width - self.height) * self.value + (self.height) / 2 - self.border_width,
self.height - self.border_width))
for part in self.progress_parts:
if type(self.progress_color) == tuple:
self.canvas.itemconfig(part, fill=self.progress_color[self.appearance_mode], width=0)
else:
self.canvas.itemconfig(part, fill=self.progress_color, width=0)
self.canvas.update()
self.canvas.update_idletasks()
def change_appearance_mode(self, mode_string):
if mode_string.lower() == "dark":
self.appearance_mode = 1
elif mode_string.lower() == "light":
self.appearance_mode = 0
if isinstance(self.master, CTkFrame):
self.bg_color = self.master.fg_color
else:
self.bg_color = self.master.cget("bg")
self.draw()
|
[
"tkinter.Canvas"
] |
[((1339, 1430), 'tkinter.Canvas', 'tkinter.Canvas', ([], {'master': 'self', 'highlightthicknes': '(0)', 'width': 'self.width', 'height': 'self.height'}), '(master=self, highlightthicknes=0, width=self.width, height=\n self.height)\n', (1353, 1430), False, 'import tkinter\n')]
|
"""
This is a sample stub of loadgen with multiple processes support.
Each process sets its affinity by a proc list.
Loadgen is a producer, which calls issue_queries(). issue_queries() gets query
from loadgen and puts query id/sample indices into an input queue.
Each Consumer(process)'s run() reads input queue, calls model_predict() to get
inference result, and put result into output queue.
A standalone thread's response_loadgen() reads output queue, and responds
inference result to loadgen.
Server and Offline scenario PerformanceOnly mode are verified.
Each Model needs to implement below
model_predict()
load_query_samples()
unload_query_samples()
For model_predict(), how to return data to loadgen is model specific, the
loadgen CPP API requires a data pointer and length, then it saves the data to
mlperf_log_accuracy.json, which is used to generate accuracy number offline.
"""
import multiprocessing
import threading
import subprocess
import time
import os
import sys
import argparse
import array
import logging
import numpy as np
import mlperf_loadgen as lg
from collections import defaultdict
logging.basicConfig(level=logging.INFO)
log = logging.getLogger("MXNet-BERT")
num_cpus = 28
num_ins = 2
NANO_SEC = 1e9
MILLI_SEC = 1000
in_queue_cnt = 0
out_queue_cnt = 0
bs_step = 8
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument("--scenario", choices=["Offline", "Server"], default="Offline", help="Scenario")
parser.add_argument("--batching", choices=["Fixed", "Dynamic", "Adaptive"], default="Adaptive", help="Batching method")
parser.add_argument("--batch-size", default=1, type=int, help="batch_size")
parser.add_argument("--num-instance", default=2, type=int, help="number of instance")
parser.add_argument("--num-phy-cpus", default=28, type=int, help="number of physical cpus")
parser.add_argument("--vocab", default='converted_from_tf_to_mxnet/tf.vocab',
type=str, help="vocab file path")
parser.add_argument("--params", default='converted_from_tf_to_mxnet/tf_fp32.params',
type=str, help="FP32 params path")
parser.add_argument("--quantized_model_prefix",
default='converted_from_tf_to_mxnet/quantized_models/model_bert_squad_quantized_customize',
type=str, help="quantized model prefix")
parser.add_argument("--accuracy", action="store_true", help="enable accuracy pass")
parser.add_argument("--quantized", action="store_true", help="use quantized model")
parser.add_argument("--mlperf-conf", default="mlperf.conf", help="mlperf rules config")
parser.add_argument("--user-conf", default="user.conf", help="user rules config")
parser.add_argument("--perf-count", default=None, help="perf count")
parser.add_argument("--profile", action="store_true", help="whether enable profiler")
parser.add_argument("--warmup", action="store_true", help="whether do warmup")
parser.add_argument("--perf_calibrate", action="store_true", help="whether do performance calibration")
args = parser.parse_args()
return args
scenario_map = {
"Offline": lg.TestScenario.Offline,
"Server": lg.TestScenario.Server,
}
def load_query_samples(sample_list):
# This is model specific place holder
pass
def unload_query_samples(sample_list):
# This is model specific place holder
pass
def block_until(counter, num_ins, t=1):
while counter.value < num_ins:
time.sleep(t)
batches = None
def load_perf_prof():
global batches
global throughputs
# load performance profile map for offline scenario
if os.path.exists("prof.py"):
from prof import prof_map
from prof import prof_bs_step
else:
prof_map = {}
prof_bs_step = 1
return
longest_seq = 0
for k, v in sorted(prof_map.items()):
if k > longest_seq:
longest_seq = k
batches = [0.0] * (longest_seq+1)
throughputs = [0.0] * (longest_seq+1)
for k, v in sorted(prof_map.items()):
max_throughput = 0.0
max_bs = 0
for i in range(1, len(v)):
current_bs = i * prof_bs_step
if current_bs/v[i] > max_throughput:
max_throughput = current_bs/v[i]
max_bs = current_bs
batches[k] = max_bs
throughputs[k] = max_throughput
def get_best_bs(seq_len):
global batches
if batches == None:
load_perf_prof()
global throughputs
while batches[seq_len] == 0:
seq_len += 1
best_seq_len = seq_len
best_bs = batches[seq_len]
best_throughput = throughputs[seq_len]
seq_len += 1
while seq_len < 385:
if throughputs[seq_len] > best_throughput:
best_seq_len = seq_len
best_bs = batches[seq_len]
best_throughput = throughputs[seq_len]
seq_len += 1
return best_seq_len, best_bs, best_throughput
class Consumer(multiprocessing.Process):
def __init__(self, task_queue, result_queue, lock, init_counter, calibrate_counter, proc_idx, world_size, args, max_pad_len=384):
multiprocessing.Process.__init__(self)
global num_ins
self.task_queue = task_queue
self.result_queue = result_queue
self.lock = lock
self.init_counter = init_counter
self.calibrate_counter = calibrate_counter
self.proc_idx = proc_idx
self.world_size = world_size
self.args = args
self.affinity = range(round(proc_idx * num_cpus / num_ins),
round((proc_idx + 1) * num_cpus / num_ins))
self.start_core_idx = proc_idx * num_cpus // num_ins
self.end_core_idx = (proc_idx + 1) * num_cpus // num_ins - 1
self.length_list = {}
self.length_time_list = {}
self.max_pad_len = max_pad_len
def warmup(self, model, data_set, context, scenario):
if self.proc_idx == 0:
print ('Start warmup...')
data_size = len(data_set.eval_features)
count = 0
import mxnet as mx
for start in range(0, data_size):
inputs_list = []
token_types_list = []
valid_length_list = []
eval_feature = data_set.eval_features[start]
_, inputs, token_types, valid_length, _, _ = eval_feature
if len(inputs) in self.length_list:
continue
self.length_list[len(inputs)] = True
max_throughput = 0.0
best_bs = 0
if scenario == 'Offline':
# only support warmup of adaptive batching
best_len, best_bs, _ = get_best_bs(len(inputs))
if best_len in self.length_list:
continue
self.length_list[best_len] = True
inputs += [0] * (best_len - len(inputs))
token_types += [0] * (best_len - len(token_types))
for i in range(best_bs):
inputs_list.append(inputs)
token_types_list.append(token_types)
valid_length_list.append(valid_length)
if self.proc_idx == 0:
print ("warmup seqlen {} batchsize {}".format(best_len, best_bs))
else:
inputs_list.append(inputs)
token_types_list.append(token_types)
valid_length_list.append(valid_length)
inputs_nd = mx.nd.array(inputs_list).as_in_context(context)
token_types_nd = mx.nd.array(token_types_list).as_in_context(context)
valid_length_nd = mx.nd.array(valid_length_list).as_in_context(context).astype('float32')
# warm up primitive once
out = model.net(inputs_nd, token_types_nd, valid_length_nd)
out_np = out.asnumpy()
count += 1
if count % 10 == 0 and self.proc_idx == 0:
print ('Warmup {} samples'.format(count))
if self.proc_idx == 0:
print ('Warmup done')
def calibrate(self, model, data_set, context):
if self.proc_idx == 0:
print ('Start calibration...')
data_size = len(data_set.eval_features)
count = 0
global bs_step
import mxnet as mx
for start in range(0, data_size):
inputs_list = []
token_types_list = []
valid_length_list = []
eval_feature = data_set.eval_features[start]
_, inputs, token_types, valid_length, _, _ = eval_feature
cur_len = len(inputs)
if cur_len in self.length_list:
continue
self.length_list[cur_len] = True
if count % self.world_size != self.proc_idx:
count += 1
continue
count += 1
length_time_list = []
length_time_list.append(0)
max_throughput = 0.0
best_bs = 0
max_len = len(inputs)
while True:
for i in range(bs_step):
inputs_list.append(inputs)
token_types_list.append(token_types)
valid_length_list.append(valid_length)
inputs_nd = mx.nd.array(inputs_list).as_in_context(context)
token_types_nd = mx.nd.array(token_types_list).as_in_context(context)
valid_length_nd = mx.nd.array(valid_length_list).as_in_context(context).astype('float32')
# warm up primitive once
out = model.net(inputs_nd, token_types_nd, valid_length_nd)
out_np = out.asnumpy()
# measure time for the batch
t0 = time.time()
for i in range(8):
out = model.net(inputs_nd, token_types_nd, valid_length_nd)
out_np = out.asnumpy()
t1 = time.time()
duration = (t1 - t0)/8.0
throughput = len(inputs_list)/duration
if throughput > max_throughput:
max_throughput = throughput
best_bs = len(inputs_list)
if len(inputs_list) >= 256:
print ("{} - Best efficiency for seq len {} is BS {} with seq/s {:.5}".format(
self.proc_idx, max_len, best_bs, max_throughput))
break
#print ("{} - Best efficiency for seq len {} is BS {} with seq/s {:.5}, current BS {} seq/s {:.5}\r".format(
# self.proc_idx, max_len, best_bs, max_throughput, len(inputs_list), throughput), end='')
length_time_list.append(duration)
self.length_time_list[cur_len] = length_time_list
with open('prof_new.py', 'a') as f:
for k, v in sorted(self.length_time_list.items()):
print (' {} : {},'.format(k, v), file=f)
# keep the processor hot until all instance done calibration
print ('Calibrate almost done, keep instance hot')
self.lock.acquire()
self.calibrate_counter.value += 1
self.lock.release()
while self.calibrate_counter.value < 2 * self.world_size:
out = model.net(inputs_nd, token_types_nd, valid_length_nd)
out_np = out.asnumpy()
print ('Calibrate done')
def run(self):
global batching
#os.sched_setaffinity(self.pid, self.affinity)
cmd = "taskset -p -c %d-%d %d" % (self.start_core_idx, self.end_core_idx, self.pid)
print (cmd)
os.system(cmd)
import mxnet as mx
ctx = mx.cpu()
#from numexpr.utils import set_num_threads
#set_num_threads(28)
os.environ['OMP_NUM_THREADS'] = '{}'.format(self.end_core_idx-self.start_core_idx+1)
model = BERTModel(mx.cpu(), self.args.vocab, self.args.params,
self.args.quantized, self.args.quantized_model_prefix)
data_set = BERTDataSet(self.args.vocab, self.args.perf_count)
self.lock.acquire()
self.calibrate_counter.value += 1
self.lock.release()
block_until(self.calibrate_counter, self.world_size)
if self.args.perf_calibrate:
self.calibrate(model, data_set, ctx)
return
self.lock.acquire()
self.calibrate_counter.value += 1
self.lock.release()
if self.args.warmup:
self.warmup(model, data_set, ctx, self.args.scenario)
self.lock.acquire()
self.init_counter.value += 1
self.lock.release()
#affinity = os.sched_getaffinity(self.pid)
#print('Process', self.pid, 'affinity proc list:', affinity)
cur_step = 0
start_step = 384
end_step = -1
from utils import profile
while True:
next_task = self.task_queue.get() #(self.proc_idx)
if next_task is None:
# None means shutdown
log.info('Exiting {}-pid:{}, cur_step={}'.format(self.name, self.pid, cur_step))
self.task_queue.task_done()
if self.args.profile and self.proc_idx==0:
if end_step == -1:
end_step = cur_step
profile(cur_step, start_step, end_step, profile_name='profile_{}.json'.format(self.pid), early_exit=False)
break
query_id_list = next_task.query_id_list
sample_index_list = next_task.sample_index_list
batch_size = len(sample_index_list)
#print ('pid-{}, query_id_list: {}, sample_index_list: {}'.format(self.pid, query_id_list, sample_index_list))
inputs_list = []
token_types_list = []
valid_length_list = []
for sample_index in sample_index_list:
eval_feature = data_set.eval_features[sample_index]
_, inputs, token_types, valid_length, _, _ = eval_feature
inputs_list.append(inputs)
token_types_list.append(token_types)
valid_length_list.append(valid_length)
if len(inputs_list) > 1:
max_len = max([len(inp) for inp in inputs_list])
new_max_len, bs, best_throughput = get_best_bs(max_len)
if bs == len(inputs_list):
max_len = new_max_len
#for i in range(len(inputs_list)):
# inputs_list[i] += [0] * (max_len - len(inputs_list[i]))
# token_types_list[i] += [0] * (max_len - len(token_types_list[i]))
else:
max_len = self.max_pad_len #len(inputs_list[0]) #self.max_pad_len #len(inputs_list)
for i in range(len(inputs_list)):
inputs_list[i] += [0] * (max_len - len(inputs_list[i]))
token_types_list[i] += [0] * (max_len - len(token_types_list[i]))
inputs = mx.nd.array(inputs_list).as_in_context(ctx)
token_types = mx.nd.array(token_types_list).as_in_context(ctx)
valid_length = mx.nd.array(valid_length_list).as_in_context(ctx).astype('float32')
if self.args.profile and self.proc_idx==0:
profile(cur_step, start_step, end_step, profile_name='profile_{}.json'.format(self.pid), early_exit=False)
cur_step += 1
#t0 = time.time()
out = model.net(inputs, token_types, valid_length)
out_np = out.asnumpy()
#t1 = time.time()
#if self.proc_idx == 0:
# cur_throughput = len(inputs_list)/(t1-t0)
# if best_throughput != 0:
# throughput_diff = (cur_throughput - best_throughput) / best_throughput
# print ('inference seq len = {} BS = {} throughput = {:.5f} ({:.3f}%)'.format(max_len, len(inputs_list), cur_throughput, throughput_diff*100))
# else:
# print ('inference seq len = {} BS = {} throughput = {:.5f})'.format(max_len, len(inputs_list), cur_throughput))
result = Output(query_id_list, out_np)
self.result_queue.put(result)
#print('consumer-{}: output.shape={}, query_id={}'.format(self.pid, out_np.shape, query_id_list[0]))
self.task_queue.task_done()
class Input(object):
def __init__(self, id_list, index_list, sample_length_list):
assert isinstance(id_list, list)
assert isinstance(index_list, list)
assert isinstance(sample_length_list, list)
assert len(id_list) == len(index_list)
self.query_id_list = id_list
self.sample_index_list = index_list
self.sample_length_list = sample_length_list
class Output(object):
def __init__(self, query_id_list, result):
self.query_id_list = query_id_list
self.result = result
class InQueue():
def __init__(self, in_queue, batch_size, data_set):
from preprocessing_utils import max_seq_length
self.in_queue = in_queue
self.batch_size = batch_size
self.query_id_list = []
self.sample_index_list = []
self.sample_length_list = []
self.index = 0
self.data_set = data_set
self.max_seq_len = max_seq_length
def put(self, query_samples):
global in_queue_cnt
##TODO, debug
idx = [q.index for q in query_samples]
query_id = [q.id for q in query_samples]
query_len = len(query_samples)
num_samples = len(query_samples)
def idx_len(e):
idx = e.index
feature = self.data_set.eval_features[idx]
_, inputs, _, _, _, _ = feature
return len(inputs)
if num_samples == 1:
if self.batch_size == 1:
in_queue_cnt += 1
self.in_queue.put(Input([query_samples[0].id],
[query_samples[0].index],
[idx_len(query_samples[0])]))
else:
self.index += 1
if self.index < self.batch_size:
self.query_id_list.append(query_samples[0].id)
self.sample_index_list.append(query_samples[0].index)
self.sample_length_list.append(idx_len(query_samples[0]))
else:
self.query_id_list.append(query_samples[0].id)
self.sample_index_list.append(query_samples[0].index)
self.sample_length_list.append(idx_len(query_samples[0]))
self.in_queue.put(Input(self.query_id_list, self.sample_index_list, self.sample_length_list))
in_queue_cnt += self.batch_size
self.index = 0
self.query_id_list = []
self.sample_index_list = []
self.sample_length_list = []
else:
query_samples.sort(key=idx_len, reverse=True)
def enqueue_batch(cur_batch_size, base_index=0):
global in_queue_cnt
id_list = []
index_list = []
length_list = []
for i in range(cur_batch_size):
id_list.append(query_samples[base_index + i].id)
index_list.append(query_samples[base_index + i].index)
length_list.append(idx_len(query_samples[base_index + i]))
self.in_queue.put(Input(id_list, index_list, length_list))
in_queue_cnt += cur_batch_size
global batching
true_total_len = 0
total_len = 0
for i in range(num_samples):
true_total_len += idx_len(query_samples[i])
if batching == 'Dynamic':
batch_seq_len = self.batch_size * self.max_seq_len
base_index = 0
num_batches = 0
while base_index < num_samples:
base_len = idx_len(query_samples[base_index])
for i in range(base_index, num_samples):
current_len = base_len * (i-base_index+1)
if i+1 < num_samples:
next_len = base_len * (i+1-base_index+1)
if next_len > batch_seq_len:
if next_len - batch_seq_len > batch_seq_len - current_len:
next_index = i+1
else:
next_index = i+2
break
else:
next_index = i+1
break
total_len += base_len * (next_index-base_index)
enqueue_batch(next_index-base_index, base_index)
num_batches += 1
#print('pid-{2}: enqueue bs={0} and input volume {1}...'
# .format(next_index-base_index, current_len, os.getpid()))
base_index = next_index
print('pid-{1}: enqueued {0} batches, pad ratio = {2}%'
.format(num_batches, os.getpid(), (total_len-true_total_len)*100/true_total_len))
elif batching == 'Adaptive':
batch_seq_len = self.batch_size * self.max_seq_len
base_index = 0
num_batches = 0
while base_index < num_samples:
base_len = idx_len(query_samples[base_index])
best_len, best_bs, _ = get_best_bs(base_len)
next_index = base_index + best_bs
if next_index > num_samples:
next_index = num_samples
total_len += base_len * (next_index-base_index)
enqueue_batch(next_index-base_index, base_index)
num_batches += 1
#print('pid-{2}: enqueue bs={0} and input volume {1}...'
# .format(next_index-base_index, current_len, os.getpid()))
base_index = next_index
print('pid-{1}: enqueued {0} batches, pad ratio = {2}%'
.format(num_batches, os.getpid(), (total_len-true_total_len)*100/true_total_len))
else:
num_batch = num_samples // self.batch_size
remaining_batch = num_samples % self.batch_size
## TODO, remove
print('pid-{3}: split the datasets into {0} batches with bs={1} and remaining {2}...'
.format(num_batch, self.batch_size, remaining_batch, os.getpid()))
for b in range(num_batch):
base_index = b * self.batch_size
enqueue_batch(self.batch_size, base_index)
if remaining_batch > 0:
base_index = num_batch * self.batch_size
enqueue_batch(remaining_batch, base_index)
#print ('in_queue_cnt=', in_queue_cnt)
class InQueueServer():
def __init__(self, in_queue, batch_sizes, data_set, expected_total_queries):
from preprocessing_utils import max_seq_length
self.in_queues = in_queue
self.batch_sizes = batch_sizes
self.query_id_lists = defaultdict(list)
self.sample_index_lists = defaultdict(list)
self.indexes = defaultdict(int)
self.sample_length_lists = defaultdict(list)
self.data_set = data_set
self.max_seq_len = max_seq_length
self.num_buckets = len(in_queue)
self.cutoffs = sorted(list(batch_sizes.keys()))
self.expected_total_queries = expected_total_queries
self.batch_sizes = defaultdict(int)
def getQueryBucket(self, query_len):
end = 0
while end < self.num_buckets and query_len > self.cutoffs[end]:
end += 1
return self.cutoffs[end]
def getQuerySampleLength(self, query ):
idx = query.index
return len( self.data_set.eval_features[idx][1] ) # input sequence is the 2nd attribute per ex.
def put(self, query_samples):
global in_queue_cnt
global queries_so_far # Track no. of queries received from loadgen
##TODO, debug
idx = [q.index for q in query_samples]
query_id = [q.id for q in query_samples]
query_len = len(query_samples)
num_samples = len(query_samples)
if num_samples == 1:
# Use length of the query sample to determine the queue it should be put
q_length = self.getQuerySampleLength( query_samples[0] )
bucket = self.getQueryBucket( q_length )
if self.batch_sizes[bucket] == 1:
in_queue_cnt += 1
self.in_queues[bucket].put(Input([query_samples[0].id], [query_samples[0].index], [q_len]))
else:
self.indexes[bucket] += 1
if self.indexes[bucket] < self.batch_sizes[bucket]:
self.query_id_lists[bucket].append(query_samples[0].id)
self.sample_index_lists[bucket].append(query_samples[0].index)
self.sample_length__lists[bucket].append(q_length)
else:
self.query_id_lists[bucket].append(query_samples[0].id)
self.sample_index_lists[bucket].append(query_samples[0].index)
self.sample_length_lists[bucket].append(q_length)
self.in_queues[bucket].put(Input(self.query_id_lists[bucket], self.sample_index_lists[bucket], self.sample_length_lists[bucket]))
in_queue_cnt += self.batch_sizes[bucket]
self.indexes[bucket] = 0
self.query_id_lists[bucket] = []
self.sample_index_lists[bucket] = []
self.sample_length_lists[bucket] = []
if queries_so_far == self.expected_total_queries:
for bucket in self.in_queues:
query_id_list = self.query_id_lists[bucket]
sample_index_list = self.sample_index_lists[bucket]
sample_length_list = self.sample_length_lists[bucket]
for j, q_id in enumerate(query_id_list):
s_idx = sample_index_list[j]
s_len = sample_length_list[j]
self.in_queues[bucket].put(Input([q_id], [s_idx], [s_len]))
in_queue_cnt += 1
def flush_queries():
pass
def process_latencies(latencies_ns):
# It's called by loadgen to show us the recorded latencies
log.info("Average latency (ms) per query:")
log.info(np.mean(latencies_ns)/1000000.0)
log.info("Median latency (ms): ")
log.info(np.percentile(latencies_ns, 50)/1000000.0)
log.info("90 percentile latency (ms): ")
log.info(np.percentile(latencies_ns, 90)/1000000.0)
def response_loadgen(out_queue):
global out_queue_cnt
while True:
next_task = out_queue.get()
if next_task is None:
# None means shutdown
log.info('Exiting response thread')
break
query_id_list = next_task.query_id_list
result = next_task.result
batch_size = len(query_id_list)
result.reshape(batch_size, -1, 2)
out_list = np.split(result, batch_size, axis=0)
#responses = []
for i, o in enumerate(out_list):
response_array = array.array("B", np.array(o).astype(np.float32).tobytes())
bi = response_array.buffer_info()
#responses.append(lg.QuerySampleResponse(query_id_list[i], bi[0], bi[1]))
responses = [lg.QuerySampleResponse(query_id_list[i], bi[0], bi[1])]
out_queue_cnt += 1
#print('Response loadgen ({}), query_id {}, out_queue_cnt {}'.format(os.getpid(), query_id_list[i], out_queue_cnt))
lg.QuerySamplesComplete(responses)
#lg.QuerySamplesComplete(responses)
class BERTModel():
def __init__(self, ctx, mx_vocab, params, quantized, quantized_model_prefix):
import gluonnlp as nlp
from utils import BertForQA
import mxnet as mx
if quantized:
log.info('Loading quantized MXNet model...')
self.net = mx.gluon.SymbolBlock.imports('{}-symbol.json'.format(quantized_model_prefix),
['data0', 'data1', 'data2'],
'{}-0000.params'.format(quantized_model_prefix))
self.net.hybridize(static_alloc=True, static_shape=True)
else:
log.info('Loading MXNet model...')
with open(mx_vocab, 'r') as f:
vocab = nlp.vocab.BERTVocab.from_json(f.read())
bert, vocab = nlp.model.get_model(
name='bert_24_1024_16',
dataset_name=None,
vocab=vocab,
pretrained=False,
ctx=ctx,
use_pooler=False,
use_decoder=False,
use_classifier=False)
self.net = BertForQA(bert=bert)
nlp.utils.load_parameters(self.net, params, ctx=ctx, cast_dtype=True)
self.net.hybridize(static_alloc=True)
class BERTDataSet():
def __init__(self, mx_vocab, perf_count):
import gluonnlp as nlp
from preprocessing_utils import preprocess_dataset, max_seq_length, max_query_length, doc_stride
from gluonnlp.data import SQuAD
eval_features = []
with open(mx_vocab, 'r') as f:
vocab = nlp.vocab.BERTVocab.from_json(f.read())
log.info("Creating tokenizer...")
tokenizer = nlp.data.BERTTokenizer(vocab=vocab, lower=True)
round_to = None
log.info("Reading examples...")
dev_path = os.path.join(os.getcwd(), 'build/data')
dev_data = SQuAD('dev', version='1.1', root=dev_path)
dev_data_transform = preprocess_dataset(tokenizer,
dev_data,
max_seq_length=max_seq_length,
doc_stride=doc_stride,
max_query_length=max_query_length,
input_features=True)
self.eval_features = dev_data_transform
self.count = len(self.eval_features)
self.perf_count = perf_count if perf_count is not None else self.count
class MultiprocessShapeBasedQueue(object):
def __init__(self):
global num_ins
self._jq = multiprocessing.JoinableQueue()
self._instances_queue = [multiprocessing.Queue() for _ in range(num_ins)]
self._manager = multiprocessing.Manager()
self.shape_in_instance = self._manager.dict()
self.finish_status = self._manager.dict()
def get(self, instance_id=0):
return self._jq.get()
# with multiprocessing.Lock():
# if self._instances_queue[instance_id].empty():
# while True:
# item = self._jq.get()
# if item != None:
# sample_length = item.sample_length_list[0]
# batch_size = len(item.sample_index_list)
# key = (batch_size, sample_length)
# if key in self.shape_in_instance.keys():
# if self.shape_in_instance[key] == instance_id:
# return item
# else:
# target_instance = self.shape_in_instance[key]
# if target_instance in self.finish_status.keys():
# # target instance already finished execution - get item
# del shape_in_instance[key]
# return item
# else:
# self._instances_queue[target_instance].put(item)
# # reapeat while loop - get new item and check if it's suitable for instance
# else:
# # mark shape with current instance
# self.shape_in_instance[key] = instance_id
# return item
# else:
# self.finish_status[instance_id] = True
# return item # return None
# else:
# item = self._instances_queue[instance_id].get()
# return item
def put(self, obj, block=True, timeout=None):
return self._jq.put(obj, block, timeout)
##print("end put")
def task_done(self):
#print("task_done")
return self._jq.task_done()
#print("end task_done")
def join(self):
#print("join")
return self._jq.join()
#print("end join")
def main():
global num_ins
global num_cpus
global in_queue_cnt
global out_queue_cnt
global batching
global queries_so_far
global Latencies
queries_so_far = 0
args = get_args()
log.info(args)
scenario = args.scenario
accuracy_mode = args.accuracy
perf_count = args.perf_count
batch_size = args.batch_size
num_ins = args.num_instance
num_cpus = args.num_phy_cpus
batching = args.batching
# Read Loadgen and workload config parameters
settings = lg.TestSettings()
settings.scenario = scenario_map[scenario]
settings.FromConfig(args.mlperf_conf, "bert", scenario)
settings.FromConfig(args.user_conf, "bert", scenario)
settings.mode = lg.TestMode.AccuracyOnly if accuracy_mode else lg.TestMode.PerformanceOnly
# Establish communication queues
lock = multiprocessing.Lock()
init_counter = multiprocessing.Value("i", 0)
calibrate_counter = multiprocessing.Value("i", 0)
out_queue = multiprocessing.Queue()
# Create consumers
consumers = []
if scenario == "Server":
from parse_server_config import configParser
buckets = configParser( "machine_conf.json")
cutoffs = list(buckets.keys())
batch_sizes = {}
in_queue = {j: multiprocessing.JoinableQueue() for j in buckets}
proc_idx = 0
num_cpus = 0
total_ins = 0
for cutoff in list(buckets.keys()):
batch_sizes[ cutoff ] = buckets[ cutoff ]["batch_size"]
num_ins = buckets[ cutoff ]["instances"]
cpus_per_instance = buckets[ cutoff ]["cpus_per_instance"]
num_cpus = num_ins * cpus_per_instance
total_ins += num_ins
for j in range(num_ins):
consumer = Consumer( in_queue[ cutoff ], out_queue, lock, init_counter, calibrate_counter, proc_idx, num_ins, args, cutoff)
consumer.start_core_idx = proc_idx
consumer.end_core_idx = proc_idx + cpus_per_instance - 1
consumers.append(consumer)
proc_idx = consumer.end_core_idx + 1
num_ins = total_ins
else:
total_ins = num_ins
in_queue = MultiprocessShapeBasedQueue()
consumers = [Consumer(in_queue, out_queue, lock, init_counter, calibrate_counter, i, num_ins, args)
for i in range(num_ins)]
for c in consumers:
c.start()
# Dataset object used by constructQSL
data_set = BERTDataSet(args.vocab, args.perf_count)
if scenario=="Server":
issue_queue = InQueueServer(in_queue, batch_sizes, data_set, settings.min_query_count)
else:
issue_queue = InQueue(in_queue, batch_size, data_set)
# Wait until all sub-processors are ready
block_until(init_counter, total_ins, 2)
# Start response thread
response_worker = threading.Thread(
target=response_loadgen, args=(out_queue,))
response_worker.daemon = True
response_worker.start()
def issue_queries(query_samples):
# It's called by loadgen to send query to SUT
issue_queue.put(query_samples)
sut = lg.ConstructSUT(
issue_queries, flush_queries, process_latencies)
qsl = lg.ConstructQSL(
data_set.count, data_set.perf_count, load_query_samples, unload_query_samples)
log_path = "build/logs"
if not os.path.exists(log_path):
os.makedirs(log_path)
log_output_settings = lg.LogOutputSettings()
log_output_settings.outdir = log_path
log_output_settings.copy_summary_to_stdout = True
log_settings = lg.LogSettings()
log_settings.log_output = log_output_settings
lg.StartTestWithLogSettings(sut, qsl, settings, log_settings)
# Wait until outQueue done
while out_queue_cnt < in_queue_cnt:
time.sleep(0.2)
if scenario == "Server":
for i in in_queue:
in_queue[i].join()
for j in range(buckets[ i ]["cpus_per_instance"]):
in_queue[i].put(None)
else:
for i in range(num_ins):
in_queue.put(None)
for c in consumers:
c.join()
out_queue.put(None)
if accuracy_mode:
cmd = "python accuracy-squad.py --log_file={}/mlperf_log_accuracy.json".format(log_path)
subprocess.check_call(cmd, shell=True)
lg.DestroyQSL(qsl)
lg.DestroySUT(sut)
if __name__ == '__main__':
main()
|
[
"argparse.ArgumentParser",
"multiprocessing.Lock",
"mlperf_loadgen.TestSettings",
"multiprocessing.Value",
"collections.defaultdict",
"numpy.mean",
"multiprocessing.Queue",
"multiprocessing.Process.__init__",
"subprocess.check_call",
"os.path.exists",
"gluonnlp.data.SQuAD",
"mlperf_loadgen.LogOutputSettings",
"gluonnlp.data.BERTTokenizer",
"multiprocessing.JoinableQueue",
"mlperf_loadgen.QuerySampleResponse",
"threading.Thread",
"mlperf_loadgen.StartTestWithLogSettings",
"mlperf_loadgen.DestroySUT",
"os.system",
"time.sleep",
"parse_server_config.configParser",
"numpy.percentile",
"mlperf_loadgen.QuerySamplesComplete",
"mlperf_loadgen.ConstructSUT",
"mlperf_loadgen.LogSettings",
"mxnet.cpu",
"preprocessing_utils.preprocess_dataset",
"mxnet.nd.array",
"gluonnlp.utils.load_parameters",
"os.getpid",
"os.makedirs",
"logging.basicConfig",
"mlperf_loadgen.DestroyQSL",
"multiprocessing.Manager",
"gluonnlp.model.get_model",
"os.getcwd",
"mlperf_loadgen.ConstructQSL",
"utils.BertForQA",
"numpy.split",
"time.time",
"numpy.array",
"prof.prof_map.items",
"logging.getLogger"
] |
[((1116, 1155), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.INFO'}), '(level=logging.INFO)\n', (1135, 1155), False, 'import logging\n'), ((1162, 1193), 'logging.getLogger', 'logging.getLogger', (['"""MXNet-BERT"""'], {}), "('MXNet-BERT')\n", (1179, 1193), False, 'import logging\n'), ((1333, 1358), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (1356, 1358), False, 'import argparse\n'), ((3653, 3678), 'os.path.exists', 'os.path.exists', (['"""prof.py"""'], {}), "('prof.py')\n", (3667, 3678), False, 'import os\n'), ((33593, 33610), 'mlperf_loadgen.TestSettings', 'lg.TestSettings', ([], {}), '()\n', (33608, 33610), True, 'import mlperf_loadgen as lg\n'), ((33920, 33942), 'multiprocessing.Lock', 'multiprocessing.Lock', ([], {}), '()\n', (33940, 33942), False, 'import multiprocessing\n'), ((33962, 33991), 'multiprocessing.Value', 'multiprocessing.Value', (['"""i"""', '(0)'], {}), "('i', 0)\n", (33983, 33991), False, 'import multiprocessing\n'), ((34016, 34045), 'multiprocessing.Value', 'multiprocessing.Value', (['"""i"""', '(0)'], {}), "('i', 0)\n", (34037, 34045), False, 'import multiprocessing\n'), ((34062, 34085), 'multiprocessing.Queue', 'multiprocessing.Queue', ([], {}), '()\n', (34083, 34085), False, 'import multiprocessing\n'), ((35934, 35994), 'threading.Thread', 'threading.Thread', ([], {'target': 'response_loadgen', 'args': '(out_queue,)'}), '(target=response_loadgen, args=(out_queue,))\n', (35950, 35994), False, 'import threading\n'), ((36209, 36273), 'mlperf_loadgen.ConstructSUT', 'lg.ConstructSUT', (['issue_queries', 'flush_queries', 'process_latencies'], {}), '(issue_queries, flush_queries, process_latencies)\n', (36224, 36273), True, 'import mlperf_loadgen as lg\n'), ((36293, 36391), 'mlperf_loadgen.ConstructQSL', 'lg.ConstructQSL', (['data_set.count', 'data_set.perf_count', 'load_query_samples', 'unload_query_samples'], {}), '(data_set.count, data_set.perf_count, load_query_samples,\n unload_query_samples)\n', (36308, 36391), True, 'import mlperf_loadgen as lg\n'), ((36519, 36541), 'mlperf_loadgen.LogOutputSettings', 'lg.LogOutputSettings', ([], {}), '()\n', (36539, 36541), True, 'import mlperf_loadgen as lg\n'), ((36657, 36673), 'mlperf_loadgen.LogSettings', 'lg.LogSettings', ([], {}), '()\n', (36671, 36673), True, 'import mlperf_loadgen as lg\n'), ((36729, 36790), 'mlperf_loadgen.StartTestWithLogSettings', 'lg.StartTestWithLogSettings', (['sut', 'qsl', 'settings', 'log_settings'], {}), '(sut, qsl, settings, log_settings)\n', (36756, 36790), True, 'import mlperf_loadgen as lg\n'), ((37390, 37408), 'mlperf_loadgen.DestroyQSL', 'lg.DestroyQSL', (['qsl'], {}), '(qsl)\n', (37403, 37408), True, 'import mlperf_loadgen as lg\n'), ((37413, 37431), 'mlperf_loadgen.DestroySUT', 'lg.DestroySUT', (['sut'], {}), '(sut)\n', (37426, 37431), True, 'import mlperf_loadgen as lg\n'), ((3495, 3508), 'time.sleep', 'time.sleep', (['t'], {}), '(t)\n', (3505, 3508), False, 'import time\n'), ((3867, 3883), 'prof.prof_map.items', 'prof_map.items', ([], {}), '()\n', (3881, 3883), False, 'from prof import prof_map\n'), ((4045, 4061), 'prof.prof_map.items', 'prof_map.items', ([], {}), '()\n', (4059, 4061), False, 'from prof import prof_map\n'), ((5137, 5175), 'multiprocessing.Process.__init__', 'multiprocessing.Process.__init__', (['self'], {}), '(self)\n', (5169, 5175), False, 'import multiprocessing\n'), ((11582, 11596), 'os.system', 'os.system', (['cmd'], {}), '(cmd)\n', (11591, 11596), False, 'import os\n'), ((11638, 11646), 'mxnet.cpu', 'mx.cpu', ([], {}), '()\n', (11644, 11646), True, 'import mxnet as mx\n'), ((23323, 23340), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (23334, 23340), False, 'from collections import defaultdict\n'), ((23375, 23392), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (23386, 23392), False, 'from collections import defaultdict\n'), ((23416, 23432), 'collections.defaultdict', 'defaultdict', (['int'], {}), '(int)\n', (23427, 23432), False, 'from collections import defaultdict\n'), ((23468, 23485), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (23479, 23485), False, 'from collections import defaultdict\n'), ((23749, 23765), 'collections.defaultdict', 'defaultdict', (['int'], {}), '(int)\n', (23760, 23765), False, 'from collections import defaultdict\n'), ((27350, 27386), 'numpy.split', 'np.split', (['result', 'batch_size'], {'axis': '(0)'}), '(result, batch_size, axis=0)\n', (27358, 27386), True, 'import numpy as np\n'), ((29727, 29774), 'gluonnlp.data.BERTTokenizer', 'nlp.data.BERTTokenizer', ([], {'vocab': 'vocab', 'lower': '(True)'}), '(vocab=vocab, lower=True)\n', (29749, 29774), True, 'import gluonnlp as nlp\n'), ((29918, 29960), 'gluonnlp.data.SQuAD', 'SQuAD', (['"""dev"""'], {'version': '"""1.1"""', 'root': 'dev_path'}), "('dev', version='1.1', root=dev_path)\n", (29923, 29960), False, 'from gluonnlp.data import SQuAD\n'), ((29990, 30147), 'preprocessing_utils.preprocess_dataset', 'preprocess_dataset', (['tokenizer', 'dev_data'], {'max_seq_length': 'max_seq_length', 'doc_stride': 'doc_stride', 'max_query_length': 'max_query_length', 'input_features': '(True)'}), '(tokenizer, dev_data, max_seq_length=max_seq_length,\n doc_stride=doc_stride, max_query_length=max_query_length,\n input_features=True)\n', (30008, 30147), False, 'from preprocessing_utils import preprocess_dataset, max_seq_length, max_query_length, doc_stride\n'), ((30664, 30695), 'multiprocessing.JoinableQueue', 'multiprocessing.JoinableQueue', ([], {}), '()\n', (30693, 30695), False, 'import multiprocessing\n'), ((30802, 30827), 'multiprocessing.Manager', 'multiprocessing.Manager', ([], {}), '()\n', (30825, 30827), False, 'import multiprocessing\n'), ((34230, 34263), 'parse_server_config.configParser', 'configParser', (['"""machine_conf.json"""'], {}), "('machine_conf.json')\n", (34242, 34263), False, 'from parse_server_config import configParser\n'), ((36437, 36461), 'os.path.exists', 'os.path.exists', (['log_path'], {}), '(log_path)\n', (36451, 36461), False, 'import os\n'), ((36471, 36492), 'os.makedirs', 'os.makedirs', (['log_path'], {}), '(log_path)\n', (36482, 36492), False, 'import os\n'), ((36871, 36886), 'time.sleep', 'time.sleep', (['(0.2)'], {}), '(0.2)\n', (36881, 36886), False, 'import time\n'), ((37346, 37384), 'subprocess.check_call', 'subprocess.check_call', (['cmd'], {'shell': '(True)'}), '(cmd, shell=True)\n', (37367, 37384), False, 'import subprocess\n'), ((11847, 11855), 'mxnet.cpu', 'mx.cpu', ([], {}), '()\n', (11853, 11855), True, 'import mxnet as mx\n'), ((26695, 26716), 'numpy.mean', 'np.mean', (['latencies_ns'], {}), '(latencies_ns)\n', (26702, 26716), True, 'import numpy as np\n'), ((26779, 26810), 'numpy.percentile', 'np.percentile', (['latencies_ns', '(50)'], {}), '(latencies_ns, 50)\n', (26792, 26810), True, 'import numpy as np\n'), ((26880, 26911), 'numpy.percentile', 'np.percentile', (['latencies_ns', '(90)'], {}), '(latencies_ns, 90)\n', (26893, 26911), True, 'import numpy as np\n'), ((27924, 27958), 'mlperf_loadgen.QuerySamplesComplete', 'lg.QuerySamplesComplete', (['responses'], {}), '(responses)\n', (27947, 27958), True, 'import mlperf_loadgen as lg\n'), ((28826, 28995), 'gluonnlp.model.get_model', 'nlp.model.get_model', ([], {'name': '"""bert_24_1024_16"""', 'dataset_name': 'None', 'vocab': 'vocab', 'pretrained': '(False)', 'ctx': 'ctx', 'use_pooler': '(False)', 'use_decoder': '(False)', 'use_classifier': '(False)'}), "(name='bert_24_1024_16', dataset_name=None, vocab=vocab,\n pretrained=False, ctx=ctx, use_pooler=False, use_decoder=False,\n use_classifier=False)\n", (28845, 28995), True, 'import gluonnlp as nlp\n'), ((29140, 29160), 'utils.BertForQA', 'BertForQA', ([], {'bert': 'bert'}), '(bert=bert)\n', (29149, 29160), False, 'from utils import BertForQA\n'), ((29173, 29242), 'gluonnlp.utils.load_parameters', 'nlp.utils.load_parameters', (['self.net', 'params'], {'ctx': 'ctx', 'cast_dtype': '(True)'}), '(self.net, params, ctx=ctx, cast_dtype=True)\n', (29198, 29242), True, 'import gluonnlp as nlp\n'), ((29872, 29883), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (29881, 29883), False, 'import os\n'), ((30729, 30752), 'multiprocessing.Queue', 'multiprocessing.Queue', ([], {}), '()\n', (30750, 30752), False, 'import multiprocessing\n'), ((34353, 34384), 'multiprocessing.JoinableQueue', 'multiprocessing.JoinableQueue', ([], {}), '()\n', (34382, 34384), False, 'import multiprocessing\n'), ((9725, 9736), 'time.time', 'time.time', ([], {}), '()\n', (9734, 9736), False, 'import time\n'), ((9916, 9927), 'time.time', 'time.time', ([], {}), '()\n', (9925, 9927), False, 'import time\n'), ((27697, 27751), 'mlperf_loadgen.QuerySampleResponse', 'lg.QuerySampleResponse', (['query_id_list[i]', 'bi[0]', 'bi[1]'], {}), '(query_id_list[i], bi[0], bi[1])\n', (27719, 27751), True, 'import mlperf_loadgen as lg\n'), ((7471, 7495), 'mxnet.nd.array', 'mx.nd.array', (['inputs_list'], {}), '(inputs_list)\n', (7482, 7495), True, 'import mxnet as mx\n'), ((7548, 7577), 'mxnet.nd.array', 'mx.nd.array', (['token_types_list'], {}), '(token_types_list)\n', (7559, 7577), True, 'import mxnet as mx\n'), ((14950, 14974), 'mxnet.nd.array', 'mx.nd.array', (['inputs_list'], {}), '(inputs_list)\n', (14961, 14974), True, 'import mxnet as mx\n'), ((15020, 15049), 'mxnet.nd.array', 'mx.nd.array', (['token_types_list'], {}), '(token_types_list)\n', (15031, 15049), True, 'import mxnet as mx\n'), ((9261, 9285), 'mxnet.nd.array', 'mx.nd.array', (['inputs_list'], {}), '(inputs_list)\n', (9272, 9285), True, 'import mxnet as mx\n'), ((9342, 9371), 'mxnet.nd.array', 'mx.nd.array', (['token_types_list'], {}), '(token_types_list)\n', (9353, 9371), True, 'import mxnet as mx\n'), ((21210, 21221), 'os.getpid', 'os.getpid', ([], {}), '()\n', (21219, 21221), False, 'import os\n'), ((7631, 7661), 'mxnet.nd.array', 'mx.nd.array', (['valid_length_list'], {}), '(valid_length_list)\n', (7642, 7661), True, 'import mxnet as mx\n'), ((15096, 15126), 'mxnet.nd.array', 'mx.nd.array', (['valid_length_list'], {}), '(valid_length_list)\n', (15107, 15126), True, 'import mxnet as mx\n'), ((22264, 22275), 'os.getpid', 'os.getpid', ([], {}), '()\n', (22273, 22275), False, 'import os\n'), ((22673, 22684), 'os.getpid', 'os.getpid', ([], {}), '()\n', (22682, 22684), False, 'import os\n'), ((9429, 9459), 'mxnet.nd.array', 'mx.nd.array', (['valid_length_list'], {}), '(valid_length_list)\n', (9440, 9459), True, 'import mxnet as mx\n'), ((27498, 27509), 'numpy.array', 'np.array', (['o'], {}), '(o)\n', (27506, 27509), True, 'import numpy as np\n')]
|
# Author: <NAME> (<EMAIL>)
# Center for Machine Perception, Czech Technical University in Prague
"""Evaluation script for the BOP Challenge 2019."""
import os
import time
import argparse
import subprocess
import numpy as np
from bop_toolkit_lib import config
from bop_toolkit_lib import inout
from bop_toolkit_lib import misc
# PARAMETERS (some can be overwritten by the command line arguments below).
################################################################################
p = {
# Errors to calculate.
'errors': [
{
'n_top': -1,
'type': 'vsd',
'vsd_deltas': {
'hb': 15,
'icbin': 15,
'icmi': 15,
'itodd': 5,
'lm': 15,
'lmo': 15,
'ruapc': 15,
'tless': 15,
'tudl': 15,
'tyol': 15,
},
'vsd_taus': list(np.arange(0.05, 0.51, 0.05)),
'correct_th': [[th] for th in np.arange(0.05, 0.51, 0.05)]
},
{
'n_top': -1,
'type': 'mssd',
'correct_th': [[th] for th in np.arange(0.05, 0.51, 0.05)]
},
{
'n_top': -1,
'type': 'mspd',
'correct_th': [[th] for th in np.arange(5, 51, 5)]
},
],
# Minimum visible surface fraction of a valid GT pose.
'visib_gt_min': 0.1,
# See misc.get_symmetry_transformations().
'max_sym_disc_step': 0.01,
# Type of the renderer (used for the VSD pose error function).
'renderer_type': 'python', # Options: 'cpp', 'python'.
# Names of files with results for which to calculate the errors (assumed to be
# stored in folder config.eval_path). See docs/bop_challenge_2019.md for a
# description of the format. Example results can be found at:
# http://ptak.felk.cvut.cz/6DB/public/bop_sample_results/bop_challenge_2019/
'result_filenames': [
'/home_local/sund_ma/src/foreign_packages/bop/bop_results/bop_challenge_2019/hodan-iros15_lm-test.csv',
],
# File with a list of estimation targets to consider. The file is assumed to
# be stored in the dataset folder.
'targets_filename': 'test_targets_bop19.json',
}
################################################################################
# Command line arguments.
# ------------------------------------------------------------------------------
parser = argparse.ArgumentParser()
parser.add_argument('--visib_gt_min', default=p['visib_gt_min'])
parser.add_argument('--max_sym_disc_step', default=p['max_sym_disc_step'])
parser.add_argument('--renderer_type', default=p['renderer_type'])
parser.add_argument('--result_filenames',
default=','.join(p['result_filenames']),
help='Comma-separated names of files with results.')
parser.add_argument('--targets_filename', default=p['targets_filename'])
args = parser.parse_args()
p['visib_gt_min'] = float(args.visib_gt_min)
p['max_sym_disc_step'] = float(args.max_sym_disc_step)
p['renderer_type'] = str(args.renderer_type)
p['result_filenames'] = args.result_filenames.split(',')
p['targets_filename'] = str(args.targets_filename)
# Evaluation.
# ------------------------------------------------------------------------------
for result_filename in p['result_filenames']:
misc.log('===========')
misc.log('EVALUATING: {}'.format(result_filename))
misc.log('===========')
time_start = time.time()
aur = {}
for error in p['errors']:
# Calculate error of the pose estimates.
calc_errors_cmd = [
'python',
os.path.join('scripts', 'eval_calc_errors.py'),
'--n_top={}'.format(error['n_top']),
'--error_type={}'.format(error['type']),
'--result_filenames={}'.format(result_filename),
'--renderer_type={}'.format(p['renderer_type']),
'--targets_filename={}'.format(p['targets_filename']),
'--max_sym_disc_step={}'.format(p['max_sym_disc_step']),
'--skip_missing=1',
]
if error['type'] == 'vsd':
vsd_deltas_str = \
','.join(['{}:{}'.format(k, v) for k, v in error['vsd_deltas'].items()])
calc_errors_cmd += [
'--vsd_deltas={}'.format(vsd_deltas_str),
'--vsd_taus={}'.format(','.join(map(str, error['vsd_taus'])))
]
misc.log('Running: ' + ' '.join(calc_errors_cmd))
if subprocess.call(calc_errors_cmd) != 0:
raise RuntimeError('Calculation of VSD failed.')
# Name of the result and the dataset.
result_name = os.path.splitext(os.path.basename(result_filename))[0]
dataset = str(result_name.split('_')[1].split('-')[0])
# Paths (rel. to config.eval_path) to folders with calculated pose errors.
# For VSD, there is one path for each setting of tau. For the other pose
# error functions, there is only one path.
error_dir_paths = {}
if error['type'] == 'vsd':
for vsd_tau in error['vsd_taus']:
error_sign = misc.get_error_signature(
error['type'], error['n_top'], vsd_delta=error['vsd_deltas'][dataset],
vsd_tau=vsd_tau)
error_dir_paths[error_sign] = os.path.join(result_name, error_sign)
else:
error_sign = misc.get_error_signature(error['type'], error['n_top'])
error_dir_paths[error_sign] = os.path.join(result_name, error_sign)
# Recall scores for all settings of the threshold of correctness (and also
# of the misalignment tolerance tau in the case of VSD).
recalls = []
# Calculate performance scores.
for error_sign, error_dir_path in error_dir_paths.items():
for correct_th in error['correct_th']:
calc_scores_cmd = [
'python',
os.path.join('scripts', 'eval_calc_scores.py'),
'--error_dir_paths={}'.format(error_dir_path),
'--targets_filename={}'.format(p['targets_filename']),
'--visib_gt_min={}'.format(p['visib_gt_min'])
]
calc_scores_cmd += ['--correct_th_{}={}'.format(
error['type'], ','.join(map(str, correct_th)))]
misc.log('Running: ' + ' '.join(calc_scores_cmd))
if subprocess.call(calc_scores_cmd) != 0:
raise RuntimeError('Calculation of scores failed.')
# Path to file with calculated scores.
score_sign = misc.get_score_signature(correct_th, p['visib_gt_min'])
scores_filename = 'scores_{}.json'.format(score_sign)
scores_path = os.path.join(
config.eval_path, result_name, error_sign, scores_filename)
# Load the scores.
misc.log('Loading calculated scores from: {}'.format(scores_path))
scores = inout.load_json(scores_path)
recalls.append(scores['total_recall'])
# Area under precision recall:
aur[error['type']] = np.mean(recalls)
misc.log('Recall scores: {}'.format(' '.join(map(str, recalls))))
time_total = time.time() - time_start
misc.log('Evaluation of {} took {}s.'.format(result_filename, time_total))
# output final scores
err_types = [e['type'] for e in p['errors']]
for err_type in err_types:
misc.log('#### {} #### area under recall surface: {}'.format(err_type,
aur[err_type]))
if set(['vsd', 'mssd', 'mspd']).issubset(err_types):
test_set = os.path.basename(result_filename)
mean_error = np.mean([aur[err_type] for err_type in err_types])
misc.log('Average BOP score on {}: {}'.format(test_set, mean_error))
misc.log('Done.')
|
[
"argparse.ArgumentParser",
"os.path.basename",
"bop_toolkit_lib.misc.log",
"time.time",
"bop_toolkit_lib.misc.get_score_signature",
"numpy.mean",
"subprocess.call",
"numpy.arange",
"bop_toolkit_lib.inout.load_json",
"os.path.join",
"bop_toolkit_lib.misc.get_error_signature"
] |
[((2255, 2280), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (2278, 2280), False, 'import argparse\n'), ((7234, 7251), 'bop_toolkit_lib.misc.log', 'misc.log', (['"""Done."""'], {}), "('Done.')\n", (7242, 7251), False, 'from bop_toolkit_lib import misc\n'), ((3163, 3186), 'bop_toolkit_lib.misc.log', 'misc.log', (['"""==========="""'], {}), "('===========')\n", (3171, 3186), False, 'from bop_toolkit_lib import misc\n'), ((3242, 3265), 'bop_toolkit_lib.misc.log', 'misc.log', (['"""==========="""'], {}), "('===========')\n", (3250, 3265), False, 'from bop_toolkit_lib import misc\n'), ((3282, 3293), 'time.time', 'time.time', ([], {}), '()\n', (3291, 3293), False, 'import time\n'), ((6579, 6595), 'numpy.mean', 'np.mean', (['recalls'], {}), '(recalls)\n', (6586, 6595), True, 'import numpy as np\n'), ((6682, 6693), 'time.time', 'time.time', ([], {}), '()\n', (6691, 6693), False, 'import time\n'), ((7058, 7091), 'os.path.basename', 'os.path.basename', (['result_filename'], {}), '(result_filename)\n', (7074, 7091), False, 'import os\n'), ((7109, 7159), 'numpy.mean', 'np.mean', (['[aur[err_type] for err_type in err_types]'], {}), '([aur[err_type] for err_type in err_types])\n', (7116, 7159), True, 'import numpy as np\n'), ((3426, 3472), 'os.path.join', 'os.path.join', (['"""scripts"""', '"""eval_calc_errors.py"""'], {}), "('scripts', 'eval_calc_errors.py')\n", (3438, 3472), False, 'import os\n'), ((4184, 4216), 'subprocess.call', 'subprocess.call', (['calc_errors_cmd'], {}), '(calc_errors_cmd)\n', (4199, 4216), False, 'import subprocess\n'), ((5013, 5068), 'bop_toolkit_lib.misc.get_error_signature', 'misc.get_error_signature', (["error['type']", "error['n_top']"], {}), "(error['type'], error['n_top'])\n", (5037, 5068), False, 'from bop_toolkit_lib import misc\n'), ((5105, 5142), 'os.path.join', 'os.path.join', (['result_name', 'error_sign'], {}), '(result_name, error_sign)\n', (5117, 5142), False, 'import os\n'), ((831, 858), 'numpy.arange', 'np.arange', (['(0.05)', '(0.51)', '(0.05)'], {}), '(0.05, 0.51, 0.05)\n', (840, 858), True, 'import numpy as np\n'), ((4356, 4389), 'os.path.basename', 'os.path.basename', (['result_filename'], {}), '(result_filename)\n', (4372, 4389), False, 'import os\n'), ((4774, 4891), 'bop_toolkit_lib.misc.get_error_signature', 'misc.get_error_signature', (["error['type']", "error['n_top']"], {'vsd_delta': "error['vsd_deltas'][dataset]", 'vsd_tau': 'vsd_tau'}), "(error['type'], error['n_top'], vsd_delta=error[\n 'vsd_deltas'][dataset], vsd_tau=vsd_tau)\n", (4798, 4891), False, 'from bop_toolkit_lib import misc\n'), ((4946, 4983), 'os.path.join', 'os.path.join', (['result_name', 'error_sign'], {}), '(result_name, error_sign)\n', (4958, 4983), False, 'import os\n'), ((6097, 6152), 'bop_toolkit_lib.misc.get_score_signature', 'misc.get_score_signature', (['correct_th', "p['visib_gt_min']"], {}), "(correct_th, p['visib_gt_min'])\n", (6121, 6152), False, 'from bop_toolkit_lib import misc\n'), ((6238, 6310), 'os.path.join', 'os.path.join', (['config.eval_path', 'result_name', 'error_sign', 'scores_filename'], {}), '(config.eval_path, result_name, error_sign, scores_filename)\n', (6250, 6310), False, 'import os\n'), ((6442, 6470), 'bop_toolkit_lib.inout.load_json', 'inout.load_json', (['scores_path'], {}), '(scores_path)\n', (6457, 6470), False, 'from bop_toolkit_lib import inout\n'), ((897, 924), 'numpy.arange', 'np.arange', (['(0.05)', '(0.51)', '(0.05)'], {}), '(0.05, 0.51, 0.05)\n', (906, 924), True, 'import numpy as np\n'), ((1016, 1043), 'numpy.arange', 'np.arange', (['(0.05)', '(0.51)', '(0.05)'], {}), '(0.05, 0.51, 0.05)\n', (1025, 1043), True, 'import numpy as np\n'), ((1135, 1154), 'numpy.arange', 'np.arange', (['(5)', '(51)', '(5)'], {}), '(5, 51, 5)\n', (1144, 1154), True, 'import numpy as np\n'), ((5505, 5551), 'os.path.join', 'os.path.join', (['"""scripts"""', '"""eval_calc_scores.py"""'], {}), "('scripts', 'eval_calc_scores.py')\n", (5517, 5551), False, 'import os\n'), ((5927, 5959), 'subprocess.call', 'subprocess.call', (['calc_scores_cmd'], {}), '(calc_scores_cmd)\n', (5942, 5959), False, 'import subprocess\n')]
|
# ---
# jupyter:
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# %% [markdown]
# # The bike rides dataset
#
# In this notebook, we will present the "Bike Ride" dataset. This dataset is
# located in the directory `datasets` in a comma separated values (CSV) format.
#
# We open this dataset using pandas.
# %%
import pandas as pd
cycling = pd.read_csv("../datasets/bike_rides.csv")
cycling.head()
# %% [markdown]
# The first column `timestamp` contains a specific information regarding the
# the time and date of a record while other columns contain numerical value
# of some specific measurements. Let's check the data type of the columns more
# in details.
# %%
cycling.info()
# %% [markdown]
# Indeed, CSV format store data as text. Pandas tries to infer numerical type
# by default. It is the reason why all features but `timestamp` are encoded as
# floating point values. However, we see that the `timestamp` is stored as an
# `object` column. It means that the data in this column are stored as `str`
# rather than a specialized `datetime` data type.
#
# In fact, one needs to set an option such that pandas is directed to infer
# such data type when opening the file. In addition, we will want to use
# `timestamp` as an index. Thus, we can reopen the file with some extra
# arguments to help pandas at reading properly our CSV file.
# %%
cycling = pd.read_csv("../datasets/bike_rides.csv", index_col=0,
parse_dates=True)
cycling.index.name = ""
cycling.head()
# %%
cycling.info()
# %% [markdown]
# By specifying to pandas to parse the date, we obtain a `DatetimeIndex` that
# is really handy when filtering data based on date.
#
# We can now have a look at the data stored in our dataframe. It will help us
# to frame the data science problem that we try to solve.
#
# The records correspond at information derived from GPS recordings of a
# cyclist (`speed`, `acceleration`, `slope`) and some extra information
# acquired from other sensors: `heart-rate` that corresponds to the number of
# beats per minute of the cyclist heart, `cadence` that is the rate at which a
# cyclist is turning the pedals, and `power` that corresponds to the work
# required by the cyclist to go forward.
#
# The power might be slightly an abstract quantity so let's give a more
# intuitive explanation.
#
# Let's take the example of a soup blender that one uses to blend vegetable.
# The engine of this blender develop an instantaneous power of ~300 Watts to
# blend the vegetable. Here, our cyclist is just the engine of the blender (at
# the difference that an average cyclist will develop an instantaneous power
# around ~150 Watts) and blending the vegetable corresponds to move the
# cyclist's bike forward.
#
# Professional cyclists are using power to calibrate their training and track
# the energy spent during a ride. For instance, riding at a higher power
# requires more energy and thus, you need to provide resources to create this
# energy. With human, this resource is food. For our soup blender, this
# resource can be uranium, petrol, natural gas, coal, etc. Our body serves as a
# power plant to transform the resources into energy.
#
# The issue with measuring power is linked to the cost of the sensor: a cycling
# power meter. The cost of such sensor vary from $400 to $1000. Thus, our
# data science problem is quite easy: can we predict instantaneous cyclist
# power from other (cheaper) sensors.
# %%
target_name = "power"
data, target = cycling.drop(columns=target_name), cycling[target_name]
# %% [markdown]
# We can have a first look at the target distribution.
# %%
import matplotlib.pyplot as plt
target.plot.hist(bins=50, edgecolor="black")
plt.xlabel("Power (W)")
# %% [markdown]
# We see a pick at 0 Watts, it corresponds to whenever our cyclist does not
# pedals (descent, stopped). In average, this cyclist delivers a power around
# ~200 Watts. We also see a long tail from ~300 Watts to ~400 Watts. You can
# think that this range of data correspond to effort a cyclist will train to
# reproduce to be able to breakout in the final kilometers of a cycling race.
# However, this is costly for the human body and no one can cruise with this
# power output.
#
# Now, let's have a look at the data.
# %%
data.head()
# %% [markdown]
# We can first have a closer look to the index of the dataframe.
# %%
data.index
# %% [markdown]
# We see that records are acquired every seconds.
# %%
data.index.min(), data.index.max()
# %% [markdown]
# The starting date is the August 18, 2020 and the ending date is
# September 13, 2020. However, it is obvious that our cyclist did not ride
# every seconds between these dates. Indeed, only a couple of date should be
# present in the dataframe, corresponding to the number of cycling rides.
# %%
data.index.normalize().nunique()
# %% [markdown]
# Indeed, we have only four different dates corresponding to four rides. Let's
# extract only the first ride of August 18, 2020.
# %%
date_first_ride = "2020-08-18"
cycling_ride = cycling.loc[date_first_ride]
data_ride, target_ride = data.loc[date_first_ride], target.loc[date_first_ride]
# %%
data_ride.plot()
plt.legend(bbox_to_anchor=(1.05, 1), loc='upper left')
_ = plt.title("Sensor values for different cyclist measurements")
# %% [markdown]
# Since the unit and range of each measurement (feature) is different, it is
# rather difficult to interpret the plot. Also, the high temporal resolution
# make it difficult to make any observation. We could resample the data to get
# a smoother visualization.
# %%
data_ride.resample("60S").mean().plot()
plt.legend(bbox_to_anchor=(1.05, 1), loc='upper left')
_ = plt.title("Sensor values for different cyclist measurements")
# %% [markdown]
# We can check the range of the different features:
# %%
axs = data_ride.hist(figsize=(10, 12), bins=50, edgecolor="black", grid=False)
# add the units to the plots
units = ["beats per minute", "rotations per minute", "meters per second",
"meters per second squared", "%"]
for unit, ax in zip(units, axs.ravel()):
ax.set_xlabel(unit)
plt.subplots_adjust(hspace=0.6)
# %% [markdown]
# From these plots, we can see some interesting information: a cyclist is
# spending some time without pedaling. This samples should be associated with
# a null power. We also see that the slope have large extremum.
#
# Let's make a pair plot on a subset of data samples to see if we can confirm
# some of these intuitions.
# %%
import numpy as np
rng = np.random.RandomState(0)
indices = rng.choice(np.arange(cycling_ride.shape[0]), size=500, replace=False)
# %%
subset = cycling_ride.iloc[indices].copy()
# Quantize the target and keep the midpoint for each interval
subset["power"] = pd.qcut(subset["power"], 6, retbins=False)
subset["power"] = subset["power"].apply(lambda x: x.mid)
# %%
import seaborn as sns
_ = sns.pairplot(data=subset, hue="power", palette="viridis")
# %% [markdown]
# Indeed, we see that low cadence is associated with low power. We can also
# the a link between higher slope / high heart-rate and higher power: a cyclist
# need to develop more energy to go uphill enforcing a stronger physiological
# stimuli on the body. We can confirm this intuition by looking at the
# interaction between the slope and the speed: a lower speed with a higher
# slope is usually associated with higher power.
|
[
"matplotlib.pyplot.title",
"pandas.read_csv",
"matplotlib.pyplot.legend",
"numpy.random.RandomState",
"numpy.arange",
"seaborn.pairplot",
"matplotlib.pyplot.subplots_adjust",
"pandas.qcut",
"matplotlib.pyplot.xlabel"
] |
[((367, 408), 'pandas.read_csv', 'pd.read_csv', (['"""../datasets/bike_rides.csv"""'], {}), "('../datasets/bike_rides.csv')\n", (378, 408), True, 'import pandas as pd\n'), ((1387, 1459), 'pandas.read_csv', 'pd.read_csv', (['"""../datasets/bike_rides.csv"""'], {'index_col': '(0)', 'parse_dates': '(True)'}), "('../datasets/bike_rides.csv', index_col=0, parse_dates=True)\n", (1398, 1459), True, 'import pandas as pd\n'), ((3717, 3740), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Power (W)"""'], {}), "('Power (W)')\n", (3727, 3740), True, 'import matplotlib.pyplot as plt\n'), ((5180, 5234), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'bbox_to_anchor': '(1.05, 1)', 'loc': '"""upper left"""'}), "(bbox_to_anchor=(1.05, 1), loc='upper left')\n", (5190, 5234), True, 'import matplotlib.pyplot as plt\n'), ((5239, 5300), 'matplotlib.pyplot.title', 'plt.title', (['"""Sensor values for different cyclist measurements"""'], {}), "('Sensor values for different cyclist measurements')\n", (5248, 5300), True, 'import matplotlib.pyplot as plt\n'), ((5625, 5679), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'bbox_to_anchor': '(1.05, 1)', 'loc': '"""upper left"""'}), "(bbox_to_anchor=(1.05, 1), loc='upper left')\n", (5635, 5679), True, 'import matplotlib.pyplot as plt\n'), ((5684, 5745), 'matplotlib.pyplot.title', 'plt.title', (['"""Sensor values for different cyclist measurements"""'], {}), "('Sensor values for different cyclist measurements')\n", (5693, 5745), True, 'import matplotlib.pyplot as plt\n'), ((6111, 6142), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', ([], {'hspace': '(0.6)'}), '(hspace=0.6)\n', (6130, 6142), True, 'import matplotlib.pyplot as plt\n'), ((6516, 6540), 'numpy.random.RandomState', 'np.random.RandomState', (['(0)'], {}), '(0)\n', (6537, 6540), True, 'import numpy as np\n'), ((6750, 6792), 'pandas.qcut', 'pd.qcut', (["subset['power']", '(6)'], {'retbins': '(False)'}), "(subset['power'], 6, retbins=False)\n", (6757, 6792), True, 'import pandas as pd\n'), ((6883, 6940), 'seaborn.pairplot', 'sns.pairplot', ([], {'data': 'subset', 'hue': '"""power"""', 'palette': '"""viridis"""'}), "(data=subset, hue='power', palette='viridis')\n", (6895, 6940), True, 'import seaborn as sns\n'), ((6562, 6594), 'numpy.arange', 'np.arange', (['cycling_ride.shape[0]'], {}), '(cycling_ride.shape[0])\n', (6571, 6594), True, 'import numpy as np\n')]
|
import logging
import random
import numpy as np
# imports for deformed slice
from skimage.draw import line
from scipy.ndimage.measurements import label
from scipy.ndimage.interpolation import map_coordinates
from scipy.ndimage.morphology import binary_dilation
from gunpowder.batch_request import BatchRequest
from gunpowder.coordinate import Coordinate
from .batch_filter import BatchFilter
logger = logging.getLogger(__name__)
class DefectAugment(BatchFilter):
'''Augment intensity arrays section-wise with artifacts like missing
sections, low-contrast sections, by blending in artifacts drawn from a
separate source, or by deforming a section.
Args:
intensities (:class:`ArrayKey`):
The key of the array of intensities to modify.
prob_missing(``float``):
prob_low_contrast(``float``):
prob_artifact(``float``):
prob_deform(``float``):
Probabilities of having a missing section, low-contrast section, an
artifact (see param ``artifact_source``) or a deformed slice. The
sum should not exceed 1. Values in missing sections will be set to
0.
contrast_scale (``float``, optional):
By how much to scale the intensities for a low-contrast section,
used if ``prob_low_contrast`` > 0.
artifact_source (class:`BatchProvider`, optional):
A gunpowder batch provider that delivers intensities (via
:class:`ArrayKey` ``artifacts``) and an alpha mask (via
:class:`ArrayKey` ``artifacts_mask``), used if ``prob_artifact`` > 0.
artifacts(:class:`ArrayKey`, optional):
The key to query ``artifact_source`` for to get the intensities
of the artifacts.
artifacts_mask(:class:`ArrayKey`, optional):
The key to query ``artifact_source`` for to get the alpha mask
of the artifacts to blend them with ``intensities``.
deformation_strength (``int``, optional):
Strength of the slice deformation in voxels, used if
``prob_deform`` > 0. The deformation models a fold by shifting the
section contents towards a randomly oriented line in the section.
The line itself will be drawn with a value of 0.
axis (``int``, optional):
Along which axis sections are cut.
'''
def __init__(
self,
intensities,
prob_missing=0.05,
prob_low_contrast=0.05,
prob_artifact=0.0,
prob_deform=0.0,
contrast_scale=0.1,
artifact_source=None,
artifacts=None,
artifacts_mask=None,
deformation_strength=20,
axis=0):
self.intensities = intensities
self.prob_missing = prob_missing
self.prob_low_contrast = prob_low_contrast
self.prob_artifact = prob_artifact
self.prob_deform = prob_deform
self.contrast_scale = contrast_scale
self.artifact_source = artifact_source
self.artifacts = artifacts
self.artifacts_mask = artifacts_mask
self.deformation_strength = deformation_strength
self.axis = axis
def setup(self):
if self.artifact_source is not None:
self.artifact_source.setup()
def teardown(self):
if self.artifact_source is not None:
self.artifact_source.teardown()
# send roi request to data-source upstream
def prepare(self, request):
random.seed(request.random_seed)
deps = BatchRequest()
# we prepare the augmentations, by determining which slices
# will be augmented by which method
# If one of the slices is augmented with 'deform',
# we prepare these trafos already
# and request a bigger roi from upstream
prob_missing_threshold = self.prob_missing
prob_low_contrast_threshold = prob_missing_threshold + self.prob_low_contrast
prob_artifact_threshold = prob_low_contrast_threshold + self.prob_artifact
prob_deform_slice = prob_artifact_threshold + self.prob_deform
spec = request[self.intensities].copy()
roi = spec.roi
logger.debug("downstream request ROI is %s" % roi)
raw_voxel_size = self.spec[self.intensities].voxel_size
# store the mapping slice to augmentation type in a dict
self.slice_to_augmentation = {}
# store the transformations for deform slice
self.deform_slice_transformations = {}
for c in range((roi / raw_voxel_size).get_shape()[self.axis]):
r = random.random()
if r < prob_missing_threshold:
logger.debug("Zero-out " + str(c))
self.slice_to_augmentation[c] = 'zero_out'
elif r < prob_low_contrast_threshold:
logger.debug("Lower contrast " + str(c))
self.slice_to_augmentation[c] = 'lower_contrast'
elif r < prob_artifact_threshold:
logger.debug("Add artifact " + str(c))
self.slice_to_augmentation[c] = 'artifact'
elif r < prob_deform_slice:
logger.debug("Add deformed slice " + str(c))
self.slice_to_augmentation[c] = 'deformed_slice'
# get the shape of a single slice
slice_shape = (roi / raw_voxel_size).get_shape()
slice_shape = slice_shape[:self.axis] + slice_shape[self.axis+1:]
self.deform_slice_transformations[c] = self.__prepare_deform_slice(slice_shape)
# prepare transformation and
# request bigger upstream roi for deformed slice
if 'deformed_slice' in self.slice_to_augmentation.values():
# create roi sufficiently large to feed deformation
logger.debug("before growth: %s" % spec.roi)
growth = Coordinate(
tuple(0 if d == self.axis else raw_voxel_size[d] * self.deformation_strength
for d in range(spec.roi.dims()))
)
logger.debug("growing request by %s" % str(growth))
source_roi = roi.grow(growth, growth)
# update request ROI to get all voxels necessary to perfrom
# transformation
spec.roi = source_roi
logger.debug("upstream request roi is %s" % spec.roi)
deps[self.intensities] = spec
def process(self, batch, request):
assert batch.get_total_roi().dims() == 3, "defectaugment works on 3d batches only"
raw = batch.arrays[self.intensities]
raw_voxel_size = self.spec[self.intensities].voxel_size
for c, augmentation_type in self.slice_to_augmentation.items():
section_selector = tuple(
slice(None if d != self.axis else c, None if d != self.axis else c+1)
for d in range(raw.spec.roi.dims())
)
if augmentation_type == 'zero_out':
raw.data[section_selector] = 0
elif augmentation_type == 'low_contrast':
section = raw.data[section_selector]
mean = section.mean()
section -= mean
section *= self.contrast_scale
section += mean
raw.data[section_selector] = section
elif augmentation_type == 'artifact':
section = raw.data[section_selector]
alpha_voxel_size = self.artifact_source.spec[self.artifacts_mask].voxel_size
assert raw_voxel_size == alpha_voxel_size, ("Can only alpha blend RAW with "
"ALPHA_MASK if both have the same "
"voxel size")
artifact_request = BatchRequest()
artifact_request.add(self.artifacts, Coordinate(section.shape) * raw_voxel_size, voxel_size=raw_voxel_size)
artifact_request.add(self.artifacts_mask, Coordinate(section.shape) * alpha_voxel_size, voxel_size=raw_voxel_size)
logger.debug("Requesting artifact batch %s", artifact_request)
artifact_batch = self.artifact_source.request_batch(artifact_request)
artifact_alpha = artifact_batch.arrays[self.artifacts_mask].data
artifact_raw = artifact_batch.arrays[self.artifacts].data
assert artifact_alpha.dtype == np.float32
assert artifact_alpha.min() >= 0.0
assert artifact_alpha.max() <= 1.0
raw.data[section_selector] = section*(1.0 - artifact_alpha) + artifact_raw*artifact_alpha
elif augmentation_type == 'deformed_slice':
section = raw.data[section_selector].squeeze()
# set interpolation to cubic, spec interploatable is true, else to 0
interpolation = 3 if self.spec[self.intensities].interpolatable else 0
# load the deformation fields that were prepared for this slice
flow_x, flow_y, line_mask = self.deform_slice_transformations[c]
# apply the deformation fields
shape = section.shape
section = map_coordinates(
section, (flow_y, flow_x), mode='constant', order=interpolation
).reshape(shape)
# things can get smaller than 0 at the boundary, so we clip
section = np.clip(section, 0., 1.)
# zero-out data below the line mask
section[line_mask] = 0.
raw.data[section_selector] = section
# in case we needed to change the ROI due to a deformation augment,
# restore original ROI and crop the array data
if 'deformed_slice' in self.slice_to_augmentation.values():
old_roi = request[self.intensities].roi
logger.debug("resetting roi to %s" % old_roi)
crop = tuple(
slice(None) if d == self.axis else slice(self.deformation_strength, -self.deformation_strength)
for d in range(raw.spec.roi.dims())
)
raw.data = raw.data[crop]
raw.spec.roi = old_roi
def __prepare_deform_slice(self, slice_shape):
# grow slice shape by 2 x deformation strength
grow_by = 2 * self.deformation_strength
shape = (slice_shape[0] + grow_by, slice_shape[1] + grow_by)
# randomly choose fixed x or fixed y with p = 1/2
fixed_x = random.random() < .5
if fixed_x:
x0, y0 = 0, np.random.randint(1, shape[1] - 2)
x1, y1 = shape[0] - 1, np.random.randint(1, shape[1] - 2)
else:
x0, y0 = np.random.randint(1, shape[0] - 2), 0
x1, y1 = np.random.randint(1, shape[0] - 2), shape[1] - 1
## generate the mask of the line that should be blacked out
line_mask = np.zeros(shape, dtype='bool')
rr, cc = line(x0, y0, x1, y1)
line_mask[rr, cc] = 1
# generate vectorfield pointing towards the line to compress the image
# first we get the unit vector representing the line
line_vector = np.array([x1 - x0, y1 - y0], dtype='float32')
line_vector /= np.linalg.norm(line_vector)
# next, we generate the normal to the line
normal_vector = np.zeros_like(line_vector)
normal_vector[0] = - line_vector[1]
normal_vector[1] = line_vector[0]
# make meshgrid
x, y = np.meshgrid(np.arange(shape[1]), np.arange(shape[0]))
# generate the vector field
flow_x, flow_y = np.zeros(shape), np.zeros(shape)
# find the 2 components where coordinates are bigger / smaller than the line
# to apply normal vector in the correct direction
components, n_components = label(np.logical_not(line_mask).view('uint8'))
assert n_components == 2, "%i" % n_components
neg_val = components[0, 0] if fixed_x else components[-1, -1]
pos_val = components[-1, -1] if fixed_x else components[0, 0]
flow_x[components == pos_val] = self.deformation_strength * normal_vector[1]
flow_y[components == pos_val] = self.deformation_strength * normal_vector[0]
flow_x[components == neg_val] = - self.deformation_strength * normal_vector[1]
flow_y[components == neg_val] = - self.deformation_strength * normal_vector[0]
# generate the flow fields
flow_x, flow_y = (x + flow_x).reshape(-1, 1), (y + flow_y).reshape(-1, 1)
# dilate the line mask
line_mask = binary_dilation(line_mask, iterations=10)
return flow_x, flow_y, line_mask
|
[
"numpy.zeros_like",
"gunpowder.batch_request.BatchRequest",
"scipy.ndimage.morphology.binary_dilation",
"numpy.logical_not",
"numpy.zeros",
"skimage.draw.line",
"numpy.clip",
"scipy.ndimage.interpolation.map_coordinates",
"random.random",
"gunpowder.coordinate.Coordinate",
"numpy.random.randint",
"random.seed",
"numpy.array",
"numpy.linalg.norm",
"numpy.arange",
"logging.getLogger"
] |
[((404, 431), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (421, 431), False, 'import logging\n'), ((3550, 3582), 'random.seed', 'random.seed', (['request.random_seed'], {}), '(request.random_seed)\n', (3561, 3582), False, 'import random\n'), ((3598, 3612), 'gunpowder.batch_request.BatchRequest', 'BatchRequest', ([], {}), '()\n', (3610, 3612), False, 'from gunpowder.batch_request import BatchRequest\n'), ((10997, 11026), 'numpy.zeros', 'np.zeros', (['shape'], {'dtype': '"""bool"""'}), "(shape, dtype='bool')\n", (11005, 11026), True, 'import numpy as np\n'), ((11044, 11064), 'skimage.draw.line', 'line', (['x0', 'y0', 'x1', 'y1'], {}), '(x0, y0, x1, y1)\n', (11048, 11064), False, 'from skimage.draw import line\n'), ((11258, 11303), 'numpy.array', 'np.array', (['[x1 - x0, y1 - y0]'], {'dtype': '"""float32"""'}), "([x1 - x0, y1 - y0], dtype='float32')\n", (11266, 11303), True, 'import numpy as np\n'), ((11327, 11354), 'numpy.linalg.norm', 'np.linalg.norm', (['line_vector'], {}), '(line_vector)\n', (11341, 11354), True, 'import numpy as np\n'), ((11430, 11456), 'numpy.zeros_like', 'np.zeros_like', (['line_vector'], {}), '(line_vector)\n', (11443, 11456), True, 'import numpy as np\n'), ((12666, 12707), 'scipy.ndimage.morphology.binary_dilation', 'binary_dilation', (['line_mask'], {'iterations': '(10)'}), '(line_mask, iterations=10)\n', (12681, 12707), False, 'from scipy.ndimage.morphology import binary_dilation\n'), ((4656, 4671), 'random.random', 'random.random', ([], {}), '()\n', (4669, 4671), False, 'import random\n'), ((10595, 10610), 'random.random', 'random.random', ([], {}), '()\n', (10608, 10610), False, 'import random\n'), ((11595, 11614), 'numpy.arange', 'np.arange', (['shape[1]'], {}), '(shape[1])\n', (11604, 11614), True, 'import numpy as np\n'), ((11616, 11635), 'numpy.arange', 'np.arange', (['shape[0]'], {}), '(shape[0])\n', (11625, 11635), True, 'import numpy as np\n'), ((11698, 11713), 'numpy.zeros', 'np.zeros', (['shape'], {}), '(shape)\n', (11706, 11713), True, 'import numpy as np\n'), ((11715, 11730), 'numpy.zeros', 'np.zeros', (['shape'], {}), '(shape)\n', (11723, 11730), True, 'import numpy as np\n'), ((10660, 10694), 'numpy.random.randint', 'np.random.randint', (['(1)', '(shape[1] - 2)'], {}), '(1, shape[1] - 2)\n', (10677, 10694), True, 'import numpy as np\n'), ((10730, 10764), 'numpy.random.randint', 'np.random.randint', (['(1)', '(shape[1] - 2)'], {}), '(1, shape[1] - 2)\n', (10747, 10764), True, 'import numpy as np\n'), ((10800, 10834), 'numpy.random.randint', 'np.random.randint', (['(1)', '(shape[0] - 2)'], {}), '(1, shape[0] - 2)\n', (10817, 10834), True, 'import numpy as np\n'), ((10859, 10893), 'numpy.random.randint', 'np.random.randint', (['(1)', '(shape[0] - 2)'], {}), '(1, shape[0] - 2)\n', (10876, 10893), True, 'import numpy as np\n'), ((11916, 11941), 'numpy.logical_not', 'np.logical_not', (['line_mask'], {}), '(line_mask)\n', (11930, 11941), True, 'import numpy as np\n'), ((7868, 7882), 'gunpowder.batch_request.BatchRequest', 'BatchRequest', ([], {}), '()\n', (7880, 7882), False, 'from gunpowder.batch_request import BatchRequest\n'), ((9534, 9560), 'numpy.clip', 'np.clip', (['section', '(0.0)', '(1.0)'], {}), '(section, 0.0, 1.0)\n', (9541, 9560), True, 'import numpy as np\n'), ((7936, 7961), 'gunpowder.coordinate.Coordinate', 'Coordinate', (['section.shape'], {}), '(section.shape)\n', (7946, 7961), False, 'from gunpowder.coordinate import Coordinate\n'), ((8065, 8090), 'gunpowder.coordinate.Coordinate', 'Coordinate', (['section.shape'], {}), '(section.shape)\n', (8075, 8090), False, 'from gunpowder.coordinate import Coordinate\n'), ((9297, 9382), 'scipy.ndimage.interpolation.map_coordinates', 'map_coordinates', (['section', '(flow_y, flow_x)'], {'mode': '"""constant"""', 'order': 'interpolation'}), "(section, (flow_y, flow_x), mode='constant', order=interpolation\n )\n", (9312, 9382), False, 'from scipy.ndimage.interpolation import map_coordinates\n')]
|
# -*- coding: utf-8 -*-
from django.core.urlresolvers import reverse
from django.db.models import Q
from django.http import HttpResponse
from django.shortcuts import render
from django.apps import apps
from municipios.forms import FormMunicipio
def base_url_js(request):
return HttpResponse(u"var __municipios_base_url__ = '%s';" % reverse('municipios-base-url'))
def municipios_ajax(request, uf, app_label, object_name):
model_cls = apps.get_model(app_label, object_name)
municipio_list = model_cls.objects.filter(Q(uf=uf)).order_by('nome')
return render(
request,
"municipios/municipios_options.html",
{"municipio_list": municipio_list},
)
def teste(request):
form = FormMunicipio(request.GET or None)
return render(
request,
'municipios/teste.html',
{'form': form},
)
|
[
"django.apps.apps.get_model",
"django.core.urlresolvers.reverse",
"django.db.models.Q",
"django.shortcuts.render",
"municipios.forms.FormMunicipio"
] |
[((447, 485), 'django.apps.apps.get_model', 'apps.get_model', (['app_label', 'object_name'], {}), '(app_label, object_name)\n', (461, 485), False, 'from django.apps import apps\n'), ((571, 664), 'django.shortcuts.render', 'render', (['request', '"""municipios/municipios_options.html"""', "{'municipio_list': municipio_list}"], {}), "(request, 'municipios/municipios_options.html', {'municipio_list':\n municipio_list})\n", (577, 664), False, 'from django.shortcuts import render\n'), ((725, 759), 'municipios.forms.FormMunicipio', 'FormMunicipio', (['(request.GET or None)'], {}), '(request.GET or None)\n', (738, 759), False, 'from municipios.forms import FormMunicipio\n'), ((771, 827), 'django.shortcuts.render', 'render', (['request', '"""municipios/teste.html"""', "{'form': form}"], {}), "(request, 'municipios/teste.html', {'form': form})\n", (777, 827), False, 'from django.shortcuts import render\n'), ((339, 369), 'django.core.urlresolvers.reverse', 'reverse', (['"""municipios-base-url"""'], {}), "('municipios-base-url')\n", (346, 369), False, 'from django.core.urlresolvers import reverse\n'), ((533, 541), 'django.db.models.Q', 'Q', ([], {'uf': 'uf'}), '(uf=uf)\n', (534, 541), False, 'from django.db.models import Q\n')]
|
import sys
import subprocess
from setuptools import find_packages, setup
def gdal_warning(msg=None):
raise Exception(f"Unable to determine gdal version using gdal-config: {msg}")
def libgdal_version():
version = None
try:
proc = subprocess.Popen(['gdal-config', '--version'], stdout=subprocess.PIPE)
version = proc.stdout.read().decode('utf-8').rstrip()
if not version:
gdal_warning("Version not set")
except Exception as e:
gdal_warning(e)
return f"{version}.*"
requires = ["numpy", "Cartopy", "richdem",
"matplotlib", "elevation", "click<7", "scipy",
"pygdal=="+libgdal_version()]
setup(
name='src',
packages=find_packages(),
version='0.1.0',
description='Automated method to extract river profiles using GRASS GIS',
author='<NAME>',
author_email='<EMAIL>',
url="https://grass-gis-to-extract-river-profiles.readthedocs.io",
license='MIT',
scripts=['bin/extract-rivers'],
entry_points={
"console_scripts": [
"visualise = grass_river_extraction_tools.visualise_dem:main"
]
},
install_requires=requires,
python_requires='~=3.6'
)
|
[
"subprocess.Popen",
"setuptools.find_packages"
] |
[((253, 323), 'subprocess.Popen', 'subprocess.Popen', (["['gdal-config', '--version']"], {'stdout': 'subprocess.PIPE'}), "(['gdal-config', '--version'], stdout=subprocess.PIPE)\n", (269, 323), False, 'import subprocess\n'), ((714, 729), 'setuptools.find_packages', 'find_packages', ([], {}), '()\n', (727, 729), False, 'from setuptools import find_packages, setup\n')]
|
#wgs report
__author__ = '<NAME>'
#TODO checks for metrics
# write report
# write Ssheet
import csv
import os
import sys
import glob
import datetime
import argparse
import subprocess
from string import Template
parser = argparse.ArgumentParser()
parser.add_argument('-nod', help='Turn off directory creation', action='store_true')
args = parser.parse_args()
def is_number(s):
try:
float(s)
return True
except ValueError:
return False
def data_dir_check(dir_list, woid, date):
"""Create and return transfer directory if 'model' found in dir path."""
# return NA if no transfer dir found
transfer_dir = 'NA'
# iterate over data dirs
for directory in dir_list:
# if model found, create transfer dir and return path
if os.path.isdir(directory) and 'model' in directory:
dir_path_items = directory.split('/')
for no, d in enumerate(dir_path_items):
if 'model' in d:
model_directory = '/'.join(dir_path_items[:no + 1]) + '/'
transfer_dir = os.path.join(model_directory, 'data_transfer/{}_{}/'.format(woid, date))
if os.path.isdir(transfer_dir):
print('Transfer Directory already exists: {}'.format(transfer_dir))
return 'NA'
if os.path.isdir(model_directory) and not os.path.isdir(transfer_dir):
try:
os.mkdir(transfer_dir)
except OSError:
# raise OSError("Can't create destination directory {}!".format(transfer_dir))
return 'NA'
print('Data transfer directory created:\n{}'.format(transfer_dir))
return transfer_dir
return transfer_dir
mm_dd_yy = datetime.datetime.now().strftime("%m%d%y")
#Check for metrics file;
if not glob.glob('*.cwl.metrics.*.tsv'):
sys.exit('cwl.metrics file not found')
else:
metrics_files = glob.glob('*.cwl.metrics.*.tsv'.format(mm_dd_yy))
#Check, open, and create template file using Template;
if not os.path.isfile('/gscmnt/gc2783/qc/GMSworkorders/reports/wgs_results_template_file.txt'):
sys.exit('Template file not found.')
with open('/gscmnt/gc2783/qc/GMSworkorders/reports/wgs_results_template_file.txt', 'r', encoding='utf-8') as fh:
template = fh.read()
template_file = Template(template)
metrics_tracked = ['HAPLOID COVERAGE', 'discordant_rate', 'inter-chromosomal_Pairing rate',
'FREEMIX', 'FOP: PF_MISMATCH_RATE', 'SOP: PF_MISMATCH_RATE']
totals_list = ['HAPLOID COVERAGE', 'discordant_rate', 'inter-chromosomal_Pairing rate', 'FREEMIX',
'FOP: PF_MISMATCH_RATE','SOP: PF_MISMATCH_RATE', 'MEAN_INSERT_SIZE', 'STANDARD_DEVIATION',
'PCT_ADAPTER', 'PCT_20X','PCT_30X','PF_ALIGNED_BASES', 'PERCENT_DUPLICATION']
for file in metrics_files:
file_name = file.split('.')[0]
file_date = file.split('.')[-2]
SSheet_outfile = '{}.cwl.results.{}.tsv'.format(file_name, file_date)
report_outfile = '{}.cwl.report.{}.txt'.format(file_name, file_date)
# Ini. dicts
prnt_report = False
template_file_dict = {}
totals_dict = {}
tot_cnt_dict = {}
data_directories = []
print('Confluence link: \nhttps://confluence.ris.wustl.edu/pages/viewpage.action?spaceKey=AD&title=WorkOrder+{}'.format(file_name))
while True:
hap_in = input('Please enter Haploid Coverage value for {}: '.format(file_name))
if is_number(hap_in) and float(hap_in) > 0:
hap_value = float(hap_in)
break
else:
print('Please enter a positive number for Haploid Coverage. ')
while True:
seq_in = input('\nWould you like to add a SEQUENCING_NOTE? y/n: ')
if seq_in is 'y':
seq_notes = []
while True:
note_line = input()
if note_line != 'q':
seq_notes.append(note_line)
else:
break
break
elif seq_in is 'n':
seq_notes = ['']
print('Skipping SEQUENCING_NOTE')
break
else:
print('Please enter y or n')
for total in totals_list:
totals_dict[total] = 0
for metric in metrics_tracked:
template_file_dict[metric] = 0
for total in totals_list:
tot_cnt_dict[total] = 0
# Metrics File Open, Check Metrics, Generate 'results', Get Totals;
with open(file, 'r') as fh, open(SSheet_outfile, 'w') as of:
metrics_dict = csv.DictReader(fh, delimiter='\t')
header = metrics_dict.fieldnames
ofd = csv.DictWriter(of, fieldnames=header, delimiter='\t')
header.extend(['QC_Status','QC_failed_metrics'])
ofd.writeheader()
last_succeeded_build_id = []
#ini totals variables
count = 0
pass_count = 0
fail_count = 0
for line in metrics_dict:
line['QC_failed_metrics'] = ''
failed_metrics = []
template_file_dict['WOID'] = line['WorkOrder']
data_directories.append(line['data_directory'])
#Check metrics...
met_to_check = []
met_not_check = []
for met in metrics_tracked:
if met in line and is_number(line[met]):
met_to_check.append(met)
prnt_report = True
else:
met_not_check.append(met)
if 'HAPLOID COVERAGE' in met_to_check and float(line['HAPLOID COVERAGE']) < float(hap_value):
failed_metrics.append('HAPLOID COVERAGE')
template_file_dict['HAPLOID COVERAGE'] += 1
if 'discordant_rate' in met_to_check and float(line['discordant_rate']) > 5:
failed_metrics.append('discordant_rate')
template_file_dict['discordant_rate'] += 1
if 'inter-chromosomal_Pairing rate' in met_to_check and float(line['inter-chromosomal_Pairing rate']) > 0.05:
failed_metrics.append('inter-chromosomal_Pairing rate')
template_file_dict['inter-chromosomal_Pairing rate'] += 1
if 'FREEMIX' in met_to_check and float(line['FREEMIX']) > 0.05:
failed_metrics.append('FREEMIX')
template_file_dict['FREEMIX'] += 1
if 'FOP: PF_MISMATCH_RATE' in met_to_check and float(line['FOP: PF_MISMATCH_RATE']) > 0.05:
failed_metrics.append('FOP: PF_MISMATCH_RATE')
template_file_dict['FOP: PF_MISMATCH_RATE'] += 1
if 'SOP: PF_MISMATCH_RATE' in met_to_check and float(line['SOP: PF_MISMATCH_RATE']) > 0.05:
failed_metrics.append('SOP: PF_MISMATCH_RATE')
template_file_dict['SOP: PF_MISMATCH_RATE'] += 1
count += 1
if len(met_to_check) != len(metrics_tracked):
line['QC_Status'] = 'NA'
line['QC_failed_metrics'] = ','.join(failed_metrics)
elif len(failed_metrics) > 0:
line['QC_Status'] = 'FAIL'
line['QC_failed_metrics'] = ','.join(failed_metrics)
fail_count += 1
else:
line['QC_Status'] = 'PASS'
line['QC_failed_metrics'] = 'NA'
pass_count += 1
for total in totals_list:
if total in line and is_number(line[total]):
totals_dict[total] += float(line[total])
tot_cnt_dict[total] += 1
last_succeeded_build_id.append(line['last_succeeded_build'])
ofd.writerow(line)
avg_dict = {}
for total in totals_list:
if totals_dict[total] != 0:
avg_dict[total] = totals_dict[total] / tot_cnt_dict[total]
else:
avg_dict[total] = 'NA'
if prnt_report:
for metric in metrics_tracked:
if template_file_dict[metric] is 0 and pass_count + fail_count != count:
template_file_dict[metric] = 'NA'
#set unchecked metrics to NA
#print missing metric
##print report
transfer_data_directory = 'NA'
if not args.nod:
transfer_data_directory = data_dir_check(data_directories, template_file_dict['WOID'], mm_dd_yy)
with open(report_outfile, 'w', encoding='utf-8') as fhr:
fhr.write(template_file.substitute(WOID = template_file_dict['WOID'],
HAP_IN = hap_in,
SEQUENCING_NOTE = '\n'.join(seq_notes),
SAMPLE_NUMBER = count,
PASS_SAMPLES = pass_count,
FAIL = fail_count,
HAP_FAIL_COUNT = template_file_dict['HAPLOID COVERAGE'],
DIS_RT_FAIL_COUNT = template_file_dict['discordant_rate'],
INTER_CHR_FAIL_COUNT = template_file_dict['inter-chromosomal_Pairing rate'],
FREE_FAIL_COUNT = template_file_dict['FREEMIX'],
FOP_FAIL_COUNT = template_file_dict['FOP: PF_MISMATCH_RATE'],
SOP_FAIL_COUNT = template_file_dict['SOP: PF_MISMATCH_RATE'],
HAPLOID_COVERAGE = avg_dict['HAPLOID COVERAGE'],
discordant_rate = avg_dict['discordant_rate'],
inter_chromosomal_Pairing_rate = avg_dict['inter-chromosomal_Pairing rate'],
FREEMIX = avg_dict['FREEMIX'],
FOP_PF_MISMATCH_RATE = avg_dict['FOP: PF_MISMATCH_RATE'],
SOP_PF_MISMATCH_RATE= avg_dict['SOP: PF_MISMATCH_RATE'],
MEAN_INSERT_SIZE = avg_dict['MEAN_INSERT_SIZE'],
STANDARD_DEVIATION = avg_dict['STANDARD_DEVIATION'],
PCT_ADAPTER = avg_dict['PCT_ADAPTER'],
PCT_20X = avg_dict['PCT_20X'],
PCT_30X = avg_dict['PCT_30X'],
PF_ALIGNED_BASES = avg_dict['PF_ALIGNED_BASES'],
PERCENT_DUPLICATION = avg_dict['PERCENT_DUPLICATION'],
TRANSFER_DIR=transfer_data_directory,
RESULTS_SPREADSHEET = SSheet_outfile))
print('Report generated for {}'.format(file_name))
print('-----------------------')
builds = ','.join(last_succeeded_build_id)
with open('{}.Data_transfer_help.{}.txt'.format(template_file_dict['WOID'], file_date), 'w') as df:
df.write('Data Transfer Directory ={td}\ncd to parent data dir\ncd to model_data'
'\nmkdir data_transfer/{w}\nTransfer Commands:\n\ngenome model cwl-pipeline prep-for-transfer --md5sum'
' --directory={td} --builds {b}\n\n'
'genome model cwl-pipeline prep-for-transfer --md5sum'
' --directory={td} model_groups.project.id={w}\n'.format(td=transfer_data_directory, w=template_file_dict['WOID'], b=builds,))
else:
print('No report generated for {}; No required metrics found.'.format(file_name))
print('-----------------------------------------------------')
|
[
"os.mkdir",
"argparse.ArgumentParser",
"os.path.isdir",
"csv.DictReader",
"string.Template",
"os.path.isfile",
"glob.glob",
"datetime.datetime.now",
"sys.exit",
"csv.DictWriter"
] |
[((240, 265), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (263, 265), False, 'import argparse\n'), ((1971, 2003), 'glob.glob', 'glob.glob', (['"""*.cwl.metrics.*.tsv"""'], {}), "('*.cwl.metrics.*.tsv')\n", (1980, 2003), False, 'import glob\n'), ((2009, 2047), 'sys.exit', 'sys.exit', (['"""cwl.metrics file not found"""'], {}), "('cwl.metrics file not found')\n", (2017, 2047), False, 'import sys\n'), ((2187, 2279), 'os.path.isfile', 'os.path.isfile', (['"""/gscmnt/gc2783/qc/GMSworkorders/reports/wgs_results_template_file.txt"""'], {}), "(\n '/gscmnt/gc2783/qc/GMSworkorders/reports/wgs_results_template_file.txt')\n", (2201, 2279), False, 'import os\n'), ((2280, 2316), 'sys.exit', 'sys.exit', (['"""Template file not found."""'], {}), "('Template file not found.')\n", (2288, 2316), False, 'import sys\n'), ((2476, 2494), 'string.Template', 'Template', (['template'], {}), '(template)\n', (2484, 2494), False, 'from string import Template\n'), ((1895, 1918), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (1916, 1918), False, 'import datetime\n'), ((4694, 4728), 'csv.DictReader', 'csv.DictReader', (['fh'], {'delimiter': '"""\t"""'}), "(fh, delimiter='\\t')\n", (4708, 4728), False, 'import csv\n'), ((4785, 4838), 'csv.DictWriter', 'csv.DictWriter', (['of'], {'fieldnames': 'header', 'delimiter': '"""\t"""'}), "(of, fieldnames=header, delimiter='\\t')\n", (4799, 4838), False, 'import csv\n'), ((807, 831), 'os.path.isdir', 'os.path.isdir', (['directory'], {}), '(directory)\n', (820, 831), False, 'import os\n'), ((1207, 1234), 'os.path.isdir', 'os.path.isdir', (['transfer_dir'], {}), '(transfer_dir)\n', (1220, 1234), False, 'import os\n'), ((1388, 1418), 'os.path.isdir', 'os.path.isdir', (['model_directory'], {}), '(model_directory)\n', (1401, 1418), False, 'import os\n'), ((1427, 1454), 'os.path.isdir', 'os.path.isdir', (['transfer_dir'], {}), '(transfer_dir)\n', (1440, 1454), False, 'import os\n'), ((1513, 1535), 'os.mkdir', 'os.mkdir', (['transfer_dir'], {}), '(transfer_dir)\n', (1521, 1535), False, 'import os\n')]
|
#!/usr/bin/env python3
# -*- coding:utf-8 -*-
import os
import yaml
from utils.times import run_time
from config.conf import cm
@run_time
def inspect_element():
"""审查所有的元素是否正确"""
for i in os.listdir(cm.ELEMENT_PATH):
_path = os.path.join(cm.ELEMENT_PATH, i)
if os.path.isfile(_path):
with open(_path, encoding='utf-8') as f:
data = yaml.safe_load(f)
for k in data.values():
pattern, value = k.split('==')
if pattern not in cm.LOCATE_MODE:
raise AttributeError('【%s】路径中【%s]元素没有指定类型' % (i, k))
if pattern == 'xpath':
assert '//' in value, '【%s】路径中【%s]元素xpath类型与值不配' % (
i, k)
if pattern == 'css':
assert '//' not in value, '【%s】路径中【%s]元素css类型与值不配' % (
i, k)
if pattern in ('id', 'name', 'class'):
assert value, '【%s】路径中【%s]元素类型与值不匹配' % (i, k)
if __name__ == '__main__':
inspect_element()
|
[
"os.path.isfile",
"os.path.join",
"os.listdir",
"yaml.safe_load"
] |
[((198, 225), 'os.listdir', 'os.listdir', (['cm.ELEMENT_PATH'], {}), '(cm.ELEMENT_PATH)\n', (208, 225), False, 'import os\n'), ((243, 275), 'os.path.join', 'os.path.join', (['cm.ELEMENT_PATH', 'i'], {}), '(cm.ELEMENT_PATH, i)\n', (255, 275), False, 'import os\n'), ((287, 308), 'os.path.isfile', 'os.path.isfile', (['_path'], {}), '(_path)\n', (301, 308), False, 'import os\n'), ((386, 403), 'yaml.safe_load', 'yaml.safe_load', (['f'], {}), '(f)\n', (400, 403), False, 'import yaml\n')]
|
import numpy as np
import scipy.ndimage as ndimage
import matplotlib.pyplot as plt
from skimage import data, color, img_as_float
from tkinter import *
from PIL import Image
from graph_cut import GraphCut
from graph_cut_gui import GraphCutGui
class GraphCutController:
def __init__(self):
self.__init_view()
def __init_view(self):
root = Tk()
root.geometry("700x500")
self._view = GraphCutGui(self, root)
root.mainloop()
# TODO: TASK 2.1
def __get_color_histogram(self, image, seed, hist_res):
"""
Compute a color histograms based on selected points from an image
:param image: color image
:param seed: Nx2 matrix containing the the position of pixels which will be used to compute the color histogram
:param histRes: resolution of the histogram
:return hist: color histogram
"""
seed_r_values = image[seed[:, 1], seed[:, 0], 0]
seed_g_values = image[seed[:, 1], seed[:, 0], 1]
seed_b_values = image[seed[:, 1], seed[:, 0], 2]
data = np.transpose(np.vstack((seed_r_values, seed_g_values, seed_b_values)))
histogram, _ = np.histogramdd(data, hist_res, range=[(0, 255), (0, 255), (0, 255)])
# w = 2*int(truncate*sigma + 0.5) + 1
# sigma = 0.65 is taken from MATLAB default, truncate = 4 in scipy default which results in w = 7
smoothed_histogram = ndimage.gaussian_filter(histogram, 0.85)
normalized_smoothed_histogram = smoothed_histogram / np.sum(smoothed_histogram.ravel())
return normalized_smoothed_histogram
# TODO: TASK 2.2
# Hint: Set K very high using numpy's inf parameter
def __get_unaries(self, image, lambda_param, hist_fg, hist_bg, seed_fg, seed_bg):
"""
:param image: color image as a numpy array
:param lambda_param: lamdba as set by the user
:param hist_fg: foreground color histogram
:param hist_bg: background color histogram
:param seed_fg: pixels marked as foreground by the user
:param seed_bg: pixels marked as background by the user
:return: unaries : Nx2 numpy array containing the unary cost for every pixels in I (N = number of pixels in I)
"""
print("Calcuating unaries...")
hist_step = 255.0 / 32.0
image_rows = np.size(image, 0)
image_cols = np.size(image, 1)
unaries = np.empty((image_rows, image_cols, 2))
for i in range(0, image_rows):
for j in range(0, image_cols):
pixel = image[i, j, :]
pixel_bins = np.floor(pixel / hist_step).astype(int)
pixel_bins[pixel_bins == 32] = 31
cost_fg = -np.log(hist_fg[pixel_bins[0], pixel_bins[1], pixel_bins[2]] + 1e-10)
cost_bg = -np.log(hist_bg[pixel_bins[0], pixel_bins[1], pixel_bins[2]] + 1e-10)
unaries[i, j, 1] = lambda_param * cost_bg
unaries[i, j, 0] = lambda_param * cost_fg
for j, i in seed_fg:
unaries[i, j, 1] = np.inf
unaries[i, j, 0] = 0
for j, i in seed_bg:
unaries[i, j, 1] = 0
unaries[i, j, 0] = np.inf
unariesN = np.reshape(unaries, (-1, 2))
return unariesN
# TASK 2.3
def __get_pairwise(self, image, sigma):
"""
Get pairwise terms for each pairs of pixels on image
:param image: color image as a numpy array
:param sigma: ad-hoc cost function parameter
:return: pairwise : ivj (triplet or coo) formatted list of lists containing the pairwise costs for image
"""
def get_neighbours(i, j, image_rows, image_cols):
neighbours = np.array([[i - 1, j - 1], # upper left
[i - 1, j], # upper
[i - 1, j + 1], # upper right
[i, j + 1], # right
[i + 1, j + 1], # lower right
[i + 1, j], # lower
[i + 1, j - 1], # lower left
[i, j - 1]]) # left
is_boundary_1 = 0 <= neighbours[:, 0]
is_boundary_2 = image_rows > neighbours[:, 0]
is_boundary_3 = 0 <= neighbours[:, 1]
is_boundary_4 = image_cols > neighbours[:, 1]
valid = np.logical_and(np.logical_and(is_boundary_1, is_boundary_2), np.logical_and(is_boundary_3, is_boundary_4))
return neighbours[valid, :]
print("Calcuating pairwises...")
image_rows = np.size(image, 0)
image_cols = np.size(image, 1)
pairwise = []
for i in range(0, image_rows):
for j in range(0, image_cols):
current_coordinates = np.array([i, j])
current_index = i * image_cols + j
current_pixel = image[i, j].astype(float)
neighbour_coordinates = get_neighbours(i, j, image_rows, image_cols)
neighbour_indices = neighbour_coordinates[:, 0] * image_cols + neighbour_coordinates[:, 1]
neighbour_pixels = image[neighbour_coordinates[:, 0], neighbour_coordinates[:, 1]].astype(float)
pixel_differences = np.subtract(neighbour_pixels, current_pixel)
pixel_distances = np.linalg.norm(pixel_differences, axis=1)
spatial_differences = current_coordinates - neighbour_coordinates
spatial_differences = np.linalg.norm(spatial_differences, axis=1)
neighbour_costs = np.divide(np.exp(-np.square(pixel_distances) / (2 * np.square(sigma))),
spatial_differences)
for k in range(0, np.size(neighbour_indices.ravel())):
neighbour_index = neighbour_indices[k]
cost = neighbour_costs[k]
pairwise.append([current_index, neighbour_index, 0, cost, 0, 0])
if current_index%1000 == 0:
print(current_index, '/', image_rows*image_cols)
pairwise = np.asarray(pairwise)
return pairwise
# TODO TASK 2.4 get segmented image to the view
def __get_segmented_image(self, image, labels, background=None):
"""
Return a segmented image, as well as an image with new background
:param image: color image as a numpy array
:param label: labels a numpy array
:param background: color image as a numpy array
:return image_segmented: image as a numpy array with red foreground, blue background
:return image_with_background: image as a numpy array with changed background if any (None if not)
"""
image_rows = np.size(image, 0)
image_cols = np.size(image, 1)
not_labels = np.logical_not(labels)
mask = np.zeros((image_rows, image_cols, 3), dtype=np.uint8)
mask[not_labels, :] = np.array([255, 0, 0], dtype=np.uint8)
mask[labels, :] = np.array([0, 0, 255])
image_PIL = Image.fromarray(image)
mask_PIL = Image.fromarray(mask)
result_PIL = Image.blend(image_PIL, mask_PIL, 0.6)
segmented_image = np.array(result_PIL)
if background is not None:
mask = np.zeros((image_rows, image_cols), dtype=np.bool)
mask[np.logical_not(labels)] = np.bool(1)
result = np.copy(background[0:image_rows, 0:image_cols, :])
result.setflags(write=1)
result[not_labels, 0:3] = image[not_labels, 0:3]
segmented_image_with_background = result
else:
segmented_image_with_background = None
return segmented_image, segmented_image_with_background
def segment_image(self, image, seed_fg, seed_bg, lambda_value, background=None):
image_array = np.asarray(image)
background_array = None
if background:
background = background.convert("RGB")
background_array = np.asarray(background)
seed_fg = np.array(seed_fg)
seed_bg = np.array(seed_bg)
height, width = np.shape(image_array)[0:2]
num_pixels = height * width
# TASK 2.1 - get the color histogram for the unaries
hist_res = 32
cost_fg = self.__get_color_histogram(image_array, seed_fg, hist_res)
cost_bg = self.__get_color_histogram(image_array, seed_bg, hist_res)
# TASK 2.2-2.3 - set the unaries and the pairwise terms
unaries = self.__get_unaries(image_array, lambda_value, cost_fg, cost_bg, seed_fg, seed_bg)
pairwise = self.__get_pairwise(image_array, sigma=5)
# TODO: TASK 2.4 - perform graph cut
g = GraphCut(num_pixels, pairwise.__len__())
g.set_unary(unaries)
g.set_pairwise(pairwise)
g.minimize()
labels = g.get_labeling()
labels = np.reshape(labels, (height, width))
# plt.imshow(labels)
# plt.show()
# TODO TASK 2.4 get segmented image to the view
segmented_image, segmented_image_with_background = self.__get_segmented_image(image_array, labels, background_array)
# transform image array to an rgb image
segmented_image = Image.fromarray(segmented_image, 'RGB')
self._view.set_canvas_image(segmented_image)
if segmented_image_with_background is not None:
segmented_image_with_background = Image.fromarray(segmented_image_with_background, 'RGB')
plt.imshow(segmented_image_with_background)
plt.show()
|
[
"numpy.empty",
"numpy.floor",
"numpy.histogramdd",
"numpy.shape",
"numpy.linalg.norm",
"PIL.Image.blend",
"numpy.copy",
"scipy.ndimage.gaussian_filter",
"matplotlib.pyplot.imshow",
"numpy.logical_not",
"numpy.reshape",
"numpy.bool",
"numpy.size",
"matplotlib.pyplot.show",
"numpy.asarray",
"numpy.square",
"numpy.vstack",
"graph_cut_gui.GraphCutGui",
"numpy.subtract",
"numpy.logical_and",
"numpy.log",
"numpy.zeros",
"numpy.array",
"PIL.Image.fromarray"
] |
[((425, 448), 'graph_cut_gui.GraphCutGui', 'GraphCutGui', (['self', 'root'], {}), '(self, root)\n', (436, 448), False, 'from graph_cut_gui import GraphCutGui\n'), ((1178, 1246), 'numpy.histogramdd', 'np.histogramdd', (['data', 'hist_res'], {'range': '[(0, 255), (0, 255), (0, 255)]'}), '(data, hist_res, range=[(0, 255), (0, 255), (0, 255)])\n', (1192, 1246), True, 'import numpy as np\n'), ((1429, 1469), 'scipy.ndimage.gaussian_filter', 'ndimage.gaussian_filter', (['histogram', '(0.85)'], {}), '(histogram, 0.85)\n', (1452, 1469), True, 'import scipy.ndimage as ndimage\n'), ((2348, 2365), 'numpy.size', 'np.size', (['image', '(0)'], {}), '(image, 0)\n', (2355, 2365), True, 'import numpy as np\n'), ((2387, 2404), 'numpy.size', 'np.size', (['image', '(1)'], {}), '(image, 1)\n', (2394, 2404), True, 'import numpy as np\n'), ((2424, 2461), 'numpy.empty', 'np.empty', (['(image_rows, image_cols, 2)'], {}), '((image_rows, image_cols, 2))\n', (2432, 2461), True, 'import numpy as np\n'), ((3234, 3262), 'numpy.reshape', 'np.reshape', (['unaries', '(-1, 2)'], {}), '(unaries, (-1, 2))\n', (3244, 3262), True, 'import numpy as np\n'), ((4644, 4661), 'numpy.size', 'np.size', (['image', '(0)'], {}), '(image, 0)\n', (4651, 4661), True, 'import numpy as np\n'), ((4683, 4700), 'numpy.size', 'np.size', (['image', '(1)'], {}), '(image, 1)\n', (4690, 4700), True, 'import numpy as np\n'), ((6166, 6186), 'numpy.asarray', 'np.asarray', (['pairwise'], {}), '(pairwise)\n', (6176, 6186), True, 'import numpy as np\n'), ((6804, 6821), 'numpy.size', 'np.size', (['image', '(0)'], {}), '(image, 0)\n', (6811, 6821), True, 'import numpy as np\n'), ((6843, 6860), 'numpy.size', 'np.size', (['image', '(1)'], {}), '(image, 1)\n', (6850, 6860), True, 'import numpy as np\n'), ((6883, 6905), 'numpy.logical_not', 'np.logical_not', (['labels'], {}), '(labels)\n', (6897, 6905), True, 'import numpy as np\n'), ((6921, 6974), 'numpy.zeros', 'np.zeros', (['(image_rows, image_cols, 3)'], {'dtype': 'np.uint8'}), '((image_rows, image_cols, 3), dtype=np.uint8)\n', (6929, 6974), True, 'import numpy as np\n'), ((7005, 7042), 'numpy.array', 'np.array', (['[255, 0, 0]'], {'dtype': 'np.uint8'}), '([255, 0, 0], dtype=np.uint8)\n', (7013, 7042), True, 'import numpy as np\n'), ((7069, 7090), 'numpy.array', 'np.array', (['[0, 0, 255]'], {}), '([0, 0, 255])\n', (7077, 7090), True, 'import numpy as np\n'), ((7112, 7134), 'PIL.Image.fromarray', 'Image.fromarray', (['image'], {}), '(image)\n', (7127, 7134), False, 'from PIL import Image\n'), ((7154, 7175), 'PIL.Image.fromarray', 'Image.fromarray', (['mask'], {}), '(mask)\n', (7169, 7175), False, 'from PIL import Image\n'), ((7197, 7234), 'PIL.Image.blend', 'Image.blend', (['image_PIL', 'mask_PIL', '(0.6)'], {}), '(image_PIL, mask_PIL, 0.6)\n', (7208, 7234), False, 'from PIL import Image\n'), ((7262, 7282), 'numpy.array', 'np.array', (['result_PIL'], {}), '(result_PIL)\n', (7270, 7282), True, 'import numpy as np\n'), ((7903, 7920), 'numpy.asarray', 'np.asarray', (['image'], {}), '(image)\n', (7913, 7920), True, 'import numpy as np\n'), ((8099, 8116), 'numpy.array', 'np.array', (['seed_fg'], {}), '(seed_fg)\n', (8107, 8116), True, 'import numpy as np\n'), ((8135, 8152), 'numpy.array', 'np.array', (['seed_bg'], {}), '(seed_bg)\n', (8143, 8152), True, 'import numpy as np\n'), ((8938, 8973), 'numpy.reshape', 'np.reshape', (['labels', '(height, width)'], {}), '(labels, (height, width))\n', (8948, 8973), True, 'import numpy as np\n'), ((9281, 9320), 'PIL.Image.fromarray', 'Image.fromarray', (['segmented_image', '"""RGB"""'], {}), "(segmented_image, 'RGB')\n", (9296, 9320), False, 'from PIL import Image\n'), ((1097, 1153), 'numpy.vstack', 'np.vstack', (['(seed_r_values, seed_g_values, seed_b_values)'], {}), '((seed_r_values, seed_g_values, seed_b_values))\n', (1106, 1153), True, 'import numpy as np\n'), ((3733, 3859), 'numpy.array', 'np.array', (['[[i - 1, j - 1], [i - 1, j], [i - 1, j + 1], [i, j + 1], [i + 1, j + 1], [i +\n 1, j], [i + 1, j - 1], [i, j - 1]]'], {}), '([[i - 1, j - 1], [i - 1, j], [i - 1, j + 1], [i, j + 1], [i + 1, j +\n 1], [i + 1, j], [i + 1, j - 1], [i, j - 1]])\n', (3741, 3859), True, 'import numpy as np\n'), ((7338, 7387), 'numpy.zeros', 'np.zeros', (['(image_rows, image_cols)'], {'dtype': 'np.bool'}), '((image_rows, image_cols), dtype=np.bool)\n', (7346, 7387), True, 'import numpy as np\n'), ((7431, 7441), 'numpy.bool', 'np.bool', (['(1)'], {}), '(1)\n', (7438, 7441), True, 'import numpy as np\n'), ((7463, 7513), 'numpy.copy', 'np.copy', (['background[0:image_rows, 0:image_cols, :]'], {}), '(background[0:image_rows, 0:image_cols, :])\n', (7470, 7513), True, 'import numpy as np\n'), ((8058, 8080), 'numpy.asarray', 'np.asarray', (['background'], {}), '(background)\n', (8068, 8080), True, 'import numpy as np\n'), ((8177, 8198), 'numpy.shape', 'np.shape', (['image_array'], {}), '(image_array)\n', (8185, 8198), True, 'import numpy as np\n'), ((9476, 9531), 'PIL.Image.fromarray', 'Image.fromarray', (['segmented_image_with_background', '"""RGB"""'], {}), "(segmented_image_with_background, 'RGB')\n", (9491, 9531), False, 'from PIL import Image\n'), ((9544, 9587), 'matplotlib.pyplot.imshow', 'plt.imshow', (['segmented_image_with_background'], {}), '(segmented_image_with_background)\n', (9554, 9587), True, 'import matplotlib.pyplot as plt\n'), ((9600, 9610), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (9608, 9610), True, 'import matplotlib.pyplot as plt\n'), ((4447, 4491), 'numpy.logical_and', 'np.logical_and', (['is_boundary_1', 'is_boundary_2'], {}), '(is_boundary_1, is_boundary_2)\n', (4461, 4491), True, 'import numpy as np\n'), ((4493, 4537), 'numpy.logical_and', 'np.logical_and', (['is_boundary_3', 'is_boundary_4'], {}), '(is_boundary_3, is_boundary_4)\n', (4507, 4537), True, 'import numpy as np\n'), ((4844, 4860), 'numpy.array', 'np.array', (['[i, j]'], {}), '([i, j])\n', (4852, 4860), True, 'import numpy as np\n'), ((5312, 5356), 'numpy.subtract', 'np.subtract', (['neighbour_pixels', 'current_pixel'], {}), '(neighbour_pixels, current_pixel)\n', (5323, 5356), True, 'import numpy as np\n'), ((5391, 5432), 'numpy.linalg.norm', 'np.linalg.norm', (['pixel_differences'], {'axis': '(1)'}), '(pixel_differences, axis=1)\n', (5405, 5432), True, 'import numpy as np\n'), ((5553, 5596), 'numpy.linalg.norm', 'np.linalg.norm', (['spatial_differences'], {'axis': '(1)'}), '(spatial_differences, axis=1)\n', (5567, 5596), True, 'import numpy as np\n'), ((7405, 7427), 'numpy.logical_not', 'np.logical_not', (['labels'], {}), '(labels)\n', (7419, 7427), True, 'import numpy as np\n'), ((2731, 2799), 'numpy.log', 'np.log', (['(hist_fg[pixel_bins[0], pixel_bins[1], pixel_bins[2]] + 1e-10)'], {}), '(hist_fg[pixel_bins[0], pixel_bins[1], pixel_bins[2]] + 1e-10)\n', (2737, 2799), True, 'import numpy as np\n'), ((2827, 2895), 'numpy.log', 'np.log', (['(hist_bg[pixel_bins[0], pixel_bins[1], pixel_bins[2]] + 1e-10)'], {}), '(hist_bg[pixel_bins[0], pixel_bins[1], pixel_bins[2]] + 1e-10)\n', (2833, 2895), True, 'import numpy as np\n'), ((2613, 2640), 'numpy.floor', 'np.floor', (['(pixel / hist_step)'], {}), '(pixel / hist_step)\n', (2621, 2640), True, 'import numpy as np\n'), ((5650, 5676), 'numpy.square', 'np.square', (['pixel_distances'], {}), '(pixel_distances)\n', (5659, 5676), True, 'import numpy as np\n'), ((5684, 5700), 'numpy.square', 'np.square', (['sigma'], {}), '(sigma)\n', (5693, 5700), True, 'import numpy as np\n')]
|
# superpixels.py Performs SLIC algorithm #
# Authors: <NAME>, <NAME>, <NAME>, <NAME>``
# import the necessary packages
from skimage.segmentation import slic
from skimage.segmentation import mark_boundaries
from skimage.util import img_as_float
from skimage.util import img_as_ubyte
from skimage import data, io, segmentation, color
from skimage.color import rgb2gray
from skimage.future import graph
import matplotlib.pyplot as plt
import argparse
import numpy as np
import cv2
import os
# Weighting Functions based on Color Intensities
def _weight_mean_color(graph, src, dst, n):
diff = graph.nodes[dst]['mean color'] - graph.nodes[n]['mean color']
diff = np.linalg.norm(diff)
return {'weight': diff}
def merge_mean_color(graph, src, dst):
graph.nodes[dst]['total color'] += graph.nodes[src]['total color']
graph.nodes[dst]['pixel count'] += graph.nodes[src]['pixel count']
graph.nodes[dst]['mean color'] = (graph.nodes[dst]['total color'] /
graph.nodes[dst]['pixel count'])
# Grayscale & Segments the Image as a Color
def segmentImage(sourcePath,destPath):
image_gray = rgb2gray(io.imread(sourcePath))
image = io.imread(sourcePath)
#gray = cv2.cvtColor(image,cv2.COLOR_BGR2GRAY)
image_gray = np.dstack([image_gray, image_gray, image_gray])
image_gray = img_as_float(image_gray)
# load the image and convert it to a floating point data type
# loop over the number of segments
#for numSegments in (100,200,300):
numSegments = 10000
# apply SLIC and extract (approximately) the supplied number
# of segments
segments = slic(image_gray, n_segments = numSegments, sigma = 5)
g = graph.rag_mean_color(image,segments)
labels2 = graph.merge_hierarchical(segments, g, thresh=35, rag_copy=False,
in_place_merge=True,
merge_func=merge_mean_color,
weight_func=_weight_mean_color)
out = color.label2rgb(labels2, image, kind='avg', bg_label=0)
#out = segmentation.mark_boundaries(out, labels2, color=(0,0,0))
# saves segmented image
# fig = plt.figure("Superpixels -- %d segments" % (numSegments))
# ax = fig.add_subplot(1, 1, 1)
# ax.imshow(mark_boundaries(image,segments,color=(0,0,0)))
# Saving Image
io.imsave(destPath,img_as_ubyte(out))
#Prints Every Single Segment
# for (i, segVal) in enumerate(np.unique(segments)):
# print("[x] inspecting segment %d" % (i))
# mask = np.zeros(image.shape[:2], dtype = "uint8")
# mask[segments == segVal] = 255
# # show the masked region
# cv2.imshow("Mask", mask)
# cv2.imshow("Applied", cv2.bitwise_and(image, image, mask = mask))
# cv2.waitKey(1)
#plt.axis("off")
# show the plots
#plt.show()
# Segments
def segmentFolder(source,dest):
# Loops through directory for images
for file in os.listdir(source):
isPicture = file.endswith(".jpg") or file.endswith(".png") or file.endswith(".JPG") or file.endswith(".PNG")
if isPicture == True:
segmentImage(source + file, dest + file)
|
[
"numpy.dstack",
"skimage.color.label2rgb",
"skimage.util.img_as_ubyte",
"skimage.future.graph.merge_hierarchical",
"numpy.linalg.norm",
"skimage.future.graph.rag_mean_color",
"skimage.segmentation.slic",
"skimage.util.img_as_float",
"os.listdir",
"skimage.io.imread"
] |
[((668, 688), 'numpy.linalg.norm', 'np.linalg.norm', (['diff'], {}), '(diff)\n', (682, 688), True, 'import numpy as np\n'), ((1187, 1208), 'skimage.io.imread', 'io.imread', (['sourcePath'], {}), '(sourcePath)\n', (1196, 1208), False, 'from skimage import data, io, segmentation, color\n'), ((1282, 1329), 'numpy.dstack', 'np.dstack', (['[image_gray, image_gray, image_gray]'], {}), '([image_gray, image_gray, image_gray])\n', (1291, 1329), True, 'import numpy as np\n'), ((1347, 1371), 'skimage.util.img_as_float', 'img_as_float', (['image_gray'], {}), '(image_gray)\n', (1359, 1371), False, 'from skimage.util import img_as_float\n'), ((1639, 1688), 'skimage.segmentation.slic', 'slic', (['image_gray'], {'n_segments': 'numSegments', 'sigma': '(5)'}), '(image_gray, n_segments=numSegments, sigma=5)\n', (1643, 1688), False, 'from skimage.segmentation import slic\n'), ((1701, 1738), 'skimage.future.graph.rag_mean_color', 'graph.rag_mean_color', (['image', 'segments'], {}), '(image, segments)\n', (1721, 1738), False, 'from skimage.future import graph\n'), ((1752, 1907), 'skimage.future.graph.merge_hierarchical', 'graph.merge_hierarchical', (['segments', 'g'], {'thresh': '(35)', 'rag_copy': '(False)', 'in_place_merge': '(True)', 'merge_func': 'merge_mean_color', 'weight_func': '_weight_mean_color'}), '(segments, g, thresh=35, rag_copy=False,\n in_place_merge=True, merge_func=merge_mean_color, weight_func=\n _weight_mean_color)\n', (1776, 1907), False, 'from skimage.future import graph\n'), ((2017, 2072), 'skimage.color.label2rgb', 'color.label2rgb', (['labels2', 'image'], {'kind': '"""avg"""', 'bg_label': '(0)'}), "(labels2, image, kind='avg', bg_label=0)\n", (2032, 2072), False, 'from skimage import data, io, segmentation, color\n'), ((2974, 2992), 'os.listdir', 'os.listdir', (['source'], {}), '(source)\n', (2984, 2992), False, 'import os\n'), ((1152, 1173), 'skimage.io.imread', 'io.imread', (['sourcePath'], {}), '(sourcePath)\n', (1161, 1173), False, 'from skimage import data, io, segmentation, color\n'), ((2387, 2404), 'skimage.util.img_as_ubyte', 'img_as_ubyte', (['out'], {}), '(out)\n', (2399, 2404), False, 'from skimage.util import img_as_ubyte\n')]
|
# -*- coding: utf-8 -*-
import asyncio
import inspect
import os
import signal as _signal
import sys
import traceback
from asyncio import (
AbstractEventLoop,
StreamReader,
StreamWriter,
)
from asyncio import Queue # noqa: F401
from contextlib import contextmanager
from itertools import count
from unittest import mock
from typing import (
Callable,
Iterable,
Iterator,
Optional,
TextIO,
Tuple,
Union,
)
from ._mempipe import mempipe
def call_with_minimal_args(f: Callable, **kwds):
spec = inspect.signature(f)
kwds = {
k: kwds[k] for k, p in spec.parameters.items()
if p.kind != inspect.Parameter.VAR_KEYWORD
}
return f(**kwds)
class OutputStreamAdapter(StreamWriter):
"""StreamWriter that wraps a file-like object."""
def __init__(self, stream: TextIO) -> None:
self._stream = stream
def write(self, data: bytes) -> None:
self._stream.write(data.decode('utf-8'))
def writelines(self, lines: Iterable[bytes]) -> None:
for line in lines:
self._stream.write(line.decode('utf-8'))
self._stream.write('\n')
def can_write_eof(self) -> bool:
return True
# TODO: test this without closing the "real" sys.stdout!
def write_eof(self) -> None:
# self._stream.close()
pass
# NOTE: impossible to please `mypy` because documented signature is
# incomplete.
async def drain(self):
self._stream.flush()
class Process(object):
"""Mock for ``asyncio.subprocess.Process``."""
def __init__(self, *,
pid: int,
run: Callable,
argv=[],
env=None,
kwds={},
stdin: Optional[int]=None,
stdout: Union[int, TextIO, None]=None,
stderr: Union[int, TextIO, None]=None,
limit: Optional[int]=None,
loop: Optional[AbstractEventLoop]=None) -> None:
self._loop = loop or asyncio.get_event_loop()
self._pid = pid
self._stdin = None # type: Optional[StreamWriter]
self._stdout = None # type: Optional[StreamReader]
self._stderr = None # type: Optional[StreamReader]
# Handle standard input redirection.
r_stdin = None # Optional[StreamReader]
if stdin == asyncio.subprocess.PIPE:
r_stdin, self._stdin = mempipe(limit=limit, loop=loop)
else:
# TODO: wrap `sys.stdin` in a `StreamReader`.
r_stdin, self._stdin = None, None
# Handle standard output redirection.
if stdout == asyncio.subprocess.PIPE:
self._stdout, w_stdout = mempipe(limit=limit, loop=loop)
else:
stdout = stdout or sys.stdout
assert stdout is not None
assert not isinstance(stdout, int)
self._stdout, w_stdout = None, OutputStreamAdapter(stdout)
# Handle standard error redirection.
if stderr == asyncio.subprocess.PIPE:
self._stderr, w_stderr = mempipe(limit=limit, loop=loop)
else:
stderr = stderr or sys.stderr
assert stderr is not None
assert not isinstance(stderr, int)
self._stderr, w_stderr = None, OutputStreamAdapter(stderr)
# Mock signal handling.
self._signals = asyncio.Queue() # type: Queue
# Start the application-defined process simulation.
self._done = asyncio.Event(loop=loop)
self._task = self._loop.create_task(self._run_wrapper(
run,
stdin=r_stdin,
stdout=w_stdout,
stderr=w_stderr,
signals=self._signals,
env=env or {k: v for k, v in os.environ.items()},
argv=argv,
kwds=kwds,
))
# Keep a reference to the streams, we'll need them later.
self._w_stdout = w_stdout
self._w_stderr = w_stderr
# Process exit code is undefined until the simulation completes.
self._returncode = None # type: Optional[int]
async def _run_wrapper(self, run: Callable,
*, stdout, stderr, **kwds) -> int:
try:
return await call_with_minimal_args(
run, stdout=stdout, stderr=stderr, **kwds
)
except asyncio.CancelledError:
return 1
finally:
await stdout.drain()
await stderr.drain()
self._done.set()
@property
def pid(self) -> int:
return self._pid
@property
def stdin(self) -> Optional[StreamWriter]:
return self._stdin
@property
def stdout(self) -> Optional[StreamReader]:
return self._stdout
@property
def stderr(self) -> Optional[StreamReader]:
return self._stderr
async def wait(self) -> int:
await asyncio.wait({self._task})
await self._done.wait()
e = self._task.exception()
if e is None:
r = self._task.result()
if r is None:
r = 0
self._returncode = r
else:
# Format traceback and send it to stderr (as if it had been printed
# in the child process' output).
self._w_stderr.writelines(
line.encode('utf-8')
for line in traceback.format_exception(
e.__class__, e, e.__traceback__
)
)
self._returncode = 1
assert self._w_stdout
assert self._w_stderr
self._w_stdout.write_eof()
self._w_stderr.write_eof()
return self._returncode
async def communicate(self, input: bytes=b'') -> Tuple[Optional[bytes],
Optional[bytes]]:
if self._stdin:
self._stdin.write(input)
self._stdin.write_eof()
await self.wait()
stdout = None
if self._stdout:
stdout = await self._stdout.read()
stderr = None
if self._stderr:
stderr = await self._stderr.read()
return stdout, stderr
def send_signal(self, signal: int) -> None:
self._signals.put_nowait(signal)
def terminate(self) -> None:
self._task.cancel()
def kill(self) -> None:
if sys.platform == 'win32':
self.terminate()
else:
# NOTE: for a real process, we'd send SIGKILL, which would then be
# passed as SIGINT to the application, but we don't have a
# kernel to make that substution here.
self.send_signal(_signal.SIGINT)
@property
def returncode(self) -> Optional[int]:
return self._returncode
@contextmanager
def mock_subprocess(run: Callable,
loop: Optional[AbstractEventLoop]=None) -> Iterator[None]:
"""Calls ``run()`` instead of spawning a sub-process.
:param run: A coroutine function that simulates the sub-process. Can
return ``None`` or ``0`` to simulate successful process execution or a
non-zero error code to simulate sub-process terminate with a non-zero exit
code. If an exception is raised, the result is 1 (non-zero). This
function can accept a variable number of arguments, see below.
Dependency injection is used with the ``run()`` coroutine function to pass
only arguments that are declared in the function's signature. Omit all but
the arguments you intend to use. Here are all the available arguments:
- ``argv``: a list of strings passed as positional arguments to
``asyncio.create_subprocess_exec()``.
- ``stdin``: an ``asyncio.StreamReader`` instance. When output is not
redirected, this reads from the "real" ``sys.stdin``.
- ``stdout``: an ``asyncio.StreamWriter`` instance. When output is not
redirected, this writes to the "real" ``sys.stdout``.
- ``stderr``: an ``asyncio.StreamWriter`` instance. When output is not
redirected, this writes to the "real" ``sys.stderr``.
- ``env``: a ``dict`` containing environment variables passed to
``asyncio.create_subprocess_exec()``.
- ``signals``: an ``asyncio.Queue`` object that receives integers passed to
``asyncio.Process.send_signal()``.
- ``kwds``: extra keyword arguments passed to
``asyncio.create_subprocess_exec()``.
.. versionadded:: 0.1
"""
loop = loop or asyncio.get_event_loop()
pid = count(start=1)
def create_subprocess_exec(*args, stdin=None, stdout=None, env=None,
stderr=None, loop=None, limit=None, **kwds):
"""Mock for ``asyncio.create_subprocess_exec()``."""
loop = loop or asyncio.get_event_loop()
f = asyncio.Future()
process = Process(
pid=next(pid),
run=run,
loop=loop,
argv=list(args),
stdin=stdin,
stdout=stdout,
stderr=stderr,
env=env,
limit=limit,
kwds=kwds,
)
loop.call_soon(f.set_result, process)
return f
with mock.patch('asyncio.create_subprocess_exec', create_subprocess_exec):
yield None
|
[
"traceback.format_exception",
"asyncio.get_event_loop",
"asyncio.Event",
"os.environ.items",
"itertools.count",
"unittest.mock.patch",
"inspect.signature",
"asyncio.wait",
"asyncio.Queue",
"asyncio.Future"
] |
[((541, 561), 'inspect.signature', 'inspect.signature', (['f'], {}), '(f)\n', (558, 561), False, 'import inspect\n'), ((8543, 8557), 'itertools.count', 'count', ([], {'start': '(1)'}), '(start=1)\n', (8548, 8557), False, 'from itertools import count\n'), ((3396, 3411), 'asyncio.Queue', 'asyncio.Queue', ([], {}), '()\n', (3409, 3411), False, 'import asyncio\n'), ((3509, 3533), 'asyncio.Event', 'asyncio.Event', ([], {'loop': 'loop'}), '(loop=loop)\n', (3522, 3533), False, 'import asyncio\n'), ((8507, 8531), 'asyncio.get_event_loop', 'asyncio.get_event_loop', ([], {}), '()\n', (8529, 8531), False, 'import asyncio\n'), ((8829, 8845), 'asyncio.Future', 'asyncio.Future', ([], {}), '()\n', (8843, 8845), False, 'import asyncio\n'), ((9204, 9272), 'unittest.mock.patch', 'mock.patch', (['"""asyncio.create_subprocess_exec"""', 'create_subprocess_exec'], {}), "('asyncio.create_subprocess_exec', create_subprocess_exec)\n", (9214, 9272), False, 'from unittest import mock\n'), ((2039, 2063), 'asyncio.get_event_loop', 'asyncio.get_event_loop', ([], {}), '()\n', (2061, 2063), False, 'import asyncio\n'), ((4919, 4945), 'asyncio.wait', 'asyncio.wait', (['{self._task}'], {}), '({self._task})\n', (4931, 4945), False, 'import asyncio\n'), ((8792, 8816), 'asyncio.get_event_loop', 'asyncio.get_event_loop', ([], {}), '()\n', (8814, 8816), False, 'import asyncio\n'), ((5395, 5454), 'traceback.format_exception', 'traceback.format_exception', (['e.__class__', 'e', 'e.__traceback__'], {}), '(e.__class__, e, e.__traceback__)\n', (5421, 5454), False, 'import traceback\n'), ((3775, 3793), 'os.environ.items', 'os.environ.items', ([], {}), '()\n', (3791, 3793), False, 'import os\n')]
|
"""
Tests - Deuce Client - API - Project
"""
from unittest import TestCase
import deuceclient.api as api
import deuceclient.common.errors as errors
import deuceclient.common.validation as val
from deuceclient.tests import *
class ProjectTest(TestCase):
def setUp(self):
super(ProjectTest, self).setUp()
self.project_id = create_project_name()
self.vault_id = create_vault_name()
def test_create_project(self):
project = api.Project(self.project_id)
self.assertEqual(project.project_id, self.project_id)
def test_create_project_bad_type(self):
with self.assertRaises(TypeError):
project = api.Project(bytes(self.project_id))
def test_project_name_invalid(self):
with self.assertRaises(errors.InvalidProject):
project = api.Project(self.project_id + '$')
# Build project name that is too long
x = self.project_id
while len(x) < (val.PROJECT_ID_MAX_LEN + 1):
x = '{0}_{1}'.format(x, self.project_id)
with self.assertRaises(errors.InvalidProject):
project = api.Project(x)
def test_set_marker(self):
project = api.Project(self.project_id)
self.assertIsNone(project.marker)
project.marker = self.vault_id
self.assertIsNotNone(project.marker)
self.assertEqual(project.marker,
self.vault_id)
project.marker = None
self.assertIsNone(project.marker)
def test_project_add_vault(self):
project = api.Project(self.project_id)
vault = api.Vault(self.project_id,
self.vault_id)
project[vault.vault_id] = vault
self.assertEqual(vault, project[vault.vault_id])
def test_project_add_vault_invalid(self):
project = api.Project(self.project_id)
with self.assertRaises(errors.InvalidVault):
project[self.vault_id + '$'] = {}
def test_project_get_vault_invalid(self):
project = api.Project(self.project_id)
with self.assertRaises(errors.InvalidVault):
v = project[self.vault_id + '$']
def test_project_update_vault(self):
project = api.Project(self.project_id)
vaults = {
x: api.Vault(self.project_id, x) for x in [create_vault_name()]
}
project.update(vaults)
for k, vt in vaults.items():
self.assertEqual(vt, project[k])
def test_project_update_vault_invalid(self):
project = api.Project(self.project_id)
vaults = {
x: x for x in [create_vault_name()]
}
with self.assertRaises(TypeError):
project.update(vaults)
def test_repr(self):
project = api.Project(self.project_id)
serialized_project = repr(project)
|
[
"deuceclient.api.Project",
"deuceclient.api.Vault"
] |
[((467, 495), 'deuceclient.api.Project', 'api.Project', (['self.project_id'], {}), '(self.project_id)\n', (478, 495), True, 'import deuceclient.api as api\n'), ((1184, 1212), 'deuceclient.api.Project', 'api.Project', (['self.project_id'], {}), '(self.project_id)\n', (1195, 1212), True, 'import deuceclient.api as api\n'), ((1552, 1580), 'deuceclient.api.Project', 'api.Project', (['self.project_id'], {}), '(self.project_id)\n', (1563, 1580), True, 'import deuceclient.api as api\n'), ((1598, 1639), 'deuceclient.api.Vault', 'api.Vault', (['self.project_id', 'self.vault_id'], {}), '(self.project_id, self.vault_id)\n', (1607, 1639), True, 'import deuceclient.api as api\n'), ((1830, 1858), 'deuceclient.api.Project', 'api.Project', (['self.project_id'], {}), '(self.project_id)\n', (1841, 1858), True, 'import deuceclient.api as api\n'), ((2024, 2052), 'deuceclient.api.Project', 'api.Project', (['self.project_id'], {}), '(self.project_id)\n', (2035, 2052), True, 'import deuceclient.api as api\n'), ((2213, 2241), 'deuceclient.api.Project', 'api.Project', (['self.project_id'], {}), '(self.project_id)\n', (2224, 2241), True, 'import deuceclient.api as api\n'), ((2531, 2559), 'deuceclient.api.Project', 'api.Project', (['self.project_id'], {}), '(self.project_id)\n', (2542, 2559), True, 'import deuceclient.api as api\n'), ((2760, 2788), 'deuceclient.api.Project', 'api.Project', (['self.project_id'], {}), '(self.project_id)\n', (2771, 2788), True, 'import deuceclient.api as api\n'), ((825, 859), 'deuceclient.api.Project', 'api.Project', (["(self.project_id + '$')"], {}), "(self.project_id + '$')\n", (836, 859), True, 'import deuceclient.api as api\n'), ((1119, 1133), 'deuceclient.api.Project', 'api.Project', (['x'], {}), '(x)\n', (1130, 1133), True, 'import deuceclient.api as api\n'), ((2276, 2305), 'deuceclient.api.Vault', 'api.Vault', (['self.project_id', 'x'], {}), '(self.project_id, x)\n', (2285, 2305), True, 'import deuceclient.api as api\n')]
|
"""Implementation of the longest increasing subsequence algorithm."""
import operator
from bisect import bisect_right, bisect_left
from typing import TypeVar, Optional, List, Any, Iterator, Sequence, Callable
T = TypeVar('T')
def longest_increasing_subsequence(seq: Sequence[T], strict=False, key: Callable = None) -> List[T]:
"""
Returns the longest increasing subsequence of the given sequence.
There may be other increasing subsequences of the same length.
>>> longest_increasing_subsequence([0, 8, 4, 12, 2, 10, 6, 14, 1, 9, 5, 13, 3, 11, 7, 15])
[0, 2, 6, 9, 11, 15]
>>> longest_increasing_subsequence([0, 0, 1, 2, 3, 2, 1, 0, 0])
[0, 0, 1, 2, 2]
>>> longest_increasing_subsequence([0, 0, 1, 2, 3], strict=True)
[0, 1, 2, 3]
>>> longest_increasing_subsequence(['A', 'B', 'CC', 'D', 'EEE'], key=len)
['A', 'B', 'D', 'EEE']
>>> "".join(longest_increasing_subsequence('aababbbdccddd'))
'aaabbbccddd'
:param seq: A sequence-like container of comparable objects.
:param strict: Whether the subsequence must be strictly increasing.
:param key: If not None, values in sequence are compared by comparing their keys.
:return: The longest increasing subsequence in seq as a list.
"""
return _longest_monotone_subsequence(seq, True, strict, key)
def longest_decreasing_subsequence(seq: Sequence[T], strict=False, key: Callable = None) -> List[T]:
"""
Returns the longest decreasing subsequence of the given sequence.
There may be other decreasing subsequences of the same length.
>>> longest_decreasing_subsequence([0, 8, 4, 12, 2, 10, 6, 14, 1, 9, 5, 13, 3, 11, 7, 15])
[12, 10, 9, 5, 3]
>>> longest_decreasing_subsequence([0, 0, 1, 2, 3, 2, 1, 0, 0])
[3, 2, 1, 0, 0]
>>> longest_decreasing_subsequence([0, 0, 1, 2, 3, 2, 1, 0, 0], strict=True)
[3, 2, 1, 0]
:param seq: A sequence-like container of comparable objects.
:param strict: Whether the subsequence must be strictly decreasing.
:param key: If not None, values in sequence are compared by comparing their keys.
:return: The longest decreasing subsequence in seq as a list.
"""
try:
return _longest_monotone_subsequence(seq, False, strict, key, True)
except TypeError:
pass
return _longest_monotone_subsequence(seq, False, strict, key, False)
def longest_increasing_subsequence_indices(seq: Sequence[T], strict=False, key: Callable = None) -> List[int]:
"""
Returns the indices of the longest increasing subsequence of the given sequence.
There may be other increasing subsequences of the same length.
>>> longest_increasing_subsequence_indices([0, 8, 4, 12, 2, 10, 6, 14, 1, 9, 5, 13, 3, 11, 7, 15])
[0, 4, 6, 9, 13, 15]
>>> longest_increasing_subsequence_indices([0, 0, 1, 2, 3, 2, 1, 0, 0])
[0, 1, 2, 3, 5]
>>> longest_increasing_subsequence_indices([0, 0, 1, 2, 3, 2, 1, 0, 0], strict=True)
[0, 2, 3, 4]
:param seq: A sequence-like container of comparable objects.
:param strict: Whether the subsequence must be strictly increasing.
:param key: If not None, values in sequence are compared by comparing their keys.
:return: A list of indices of the longest increasing subsequence in seq.
"""
return _longest_monotone_subsequence_indices(seq, True, strict, key)
def longest_decreasing_subsequence_indices(seq: Sequence[T], strict=False, key: Callable = None) -> List[int]:
"""
Returns the indices of the longest decreasing subsequence of the given sequence.
There may be other decreasing subsequences of the same length.
>>> longest_decreasing_subsequence_indices([0, 8, 4, 12, 2, 10, 6, 14, 1, 9, 5, 13, 3, 11, 7, 15])
[3, 5, 9, 10, 12]
>>> longest_decreasing_subsequence_indices([0, 0, 1, 2, 3, 2, 1, 0, 0])
[4, 5, 6, 7, 8]
>>> longest_decreasing_subsequence_indices([0, 0, 1, 2, 3, 2, 1, 0, 0], strict=True)
[4, 5, 6, 7]
:param seq: A sequence-like container of comparable objects.
:param strict: Whether the subsequence must be strictly decreasing.
:param key: If not None, values in sequence are compared by comparing their keys.
:return: A list of indices of the longest decreasing subsequence in seq.
"""
try:
return _longest_monotone_subsequence_indices(seq, False, strict, key, True)
except TypeError:
pass
return _longest_monotone_subsequence_indices(seq, False, strict, key, False)
def _longest_monotone_subsequence(seq: Sequence[T], increasing=True, strict=False, key: Callable = None, assume_negatable=True) -> List[T]:
"""
Returns the a list of the longest increasing (respectively decreasing) subsequence of the given sequence.
There may be other increasing (respectively decreasing) subsequences of the same length.
This is not a public function, use a longest_increasing_* or longest_decreasing_* function instead.
:param seq: A sequence-like container of comparable objects.
:param increasing: Whether the subsequence should be increasing or decreasing.
:param strict: Whether the subsequence must be strictly monotone.
:param key: If not None, values in sequence are compared by comparing their keys.
:param assume_negatable: If True (the default), assume that negation (unary -) is defined and is order-reversing on objects or keys.
For non-negatable types, set this option to False.
:return: An iterator of indices of the longest monotone subsequence in seq.
"""
return [seq[idx] for idx in _longest_monotone_subsequence_indices_iter(seq, increasing, strict, key, assume_negatable)]
def _longest_monotone_subsequence_indices(seq: Sequence[T], increasing=True, strict=False, key: Callable = None, assume_negatable=True) -> List[int]:
"""
Gives a list of the indices of the longest increasing (respectively decreasing) subsequence of the given sequence.
There may be other increasing (respectively decreasing) subsequences of the same length.
This is not a public function, use a longest_increasing_* or longest_decreasing_* function instead.
:param seq: A sequence-like container of comparable objects.
:param increasing: Whether the subsequence should be increasing or decreasing.
:param strict: Whether the subsequence must be strictly monotone.
:param key: If not None, values in sequence are compared by comparing their keys.
:param assume_negatable: If True (the default), assume that negation (unary -) is defined and is order-reversing on objects or keys.
For non-negatable types, set this option to False.
:return: An iterator of indices of the longest monotone subsequence in seq.
"""
return list(_longest_monotone_subsequence_indices_iter(seq, increasing, strict, key, assume_negatable))
def _longest_monotone_subsequence_indices_iter(seq: Sequence[T], increasing=True, strict=False, key: Callable = None, assume_negatable=True) -> Iterator[int]:
"""
Yields the indices of the longest increasing (respectively decreasing) subsequence of the given sequence.
There may be other monotone subsequences of the same length.
This is not a public function, use a longest_increasing_* or longest_decreasing_* function instead.
:param seq: A sequence-like container of comparable objects.
:param increasing: Whether the subsequence should be increasing or decreasing.
:param strict: Whether the subsequence must be strictly monotone.
:param key: If not None, values in sequence are compared by comparing their keys.
:param assume_negatable: If True (the default), assume that negation (unary -) is defined and is order-reversing on objects or keys.
For non-negatable types, set this option to False.
:return: An iterator of indices of the longest monotone subsequence in seq.
"""
if not seq:
return (_ for _ in [])
idx_prev_longest: List[Optional[int]] = []
idx_min_of_len_plus1: List[int] = [] # the index of the smallest value ending a subsequence of a given length+1
val_min_of_len_plus1: List[Any] = [] # the smallest value ending a subsequence of a given length+1
bisect = bisect_right if not strict else bisect_left
key_fn = _choose_key_function(key, increasing, assume_negatable)
keys = seq if key_fn is None else map(key_fn, seq)
for i, curr_key in enumerate(keys):
len_longest_extendable = bisect(val_min_of_len_plus1, curr_key)
if len_longest_extendable == len(val_min_of_len_plus1):
idx_min_of_len_plus1.append(i)
val_min_of_len_plus1.append(curr_key)
elif curr_key < val_min_of_len_plus1[len_longest_extendable]:
idx_min_of_len_plus1[len_longest_extendable] = i
val_min_of_len_plus1[len_longest_extendable] = curr_key
idx_longest_extendable = idx_min_of_len_plus1[len_longest_extendable - 1] if len_longest_extendable else None
idx_prev_longest.append(idx_longest_extendable)
longest_subsequence_indices = _make_subsequence_indices(prev_indices=idx_prev_longest,
terminal_idx=idx_min_of_len_plus1[-1])
return longest_subsequence_indices
class _OrderReversed:
"""
A wrapper around any object that swaps its < and > operators (without touching the actual object).
>>> _OrderReversed(0) > _OrderReversed(1)
True
>>> repr(_OrderReversed(0))
'_OrderReversed(0)'
"""
__slots__ = ('obj',)
def __init__(self, o):
self.obj = o
def __lt__(self, other):
return self.obj > other.obj
def __gt__(self, other):
return self.obj < other.obj
def __repr__(self):
return f'{self.__class__.__name__}({self.obj!r})'
def _choose_key_function(key: Optional[Callable], increasing: bool, assume_negatable: bool) -> Optional[Callable]:
"""
Gives back the key function with its order optionally reversed. None represents the identity function.
>>> _choose_key_function(None, True, True) is None
True
>>> _choose_key_function(None, True, False) is None
True
>>> fn = _choose_key_function(None, False, True)
>>> fn(0) > fn(1)
True
>>> fn = _choose_key_function(None, False, False)
>>> fn(0) > fn(1)
True
>>> fn = _choose_key_function(len, True, False)
>>> fn("X") < fn("AA")
True
>>> fn = _choose_key_function(len, True, True)
>>> fn("X") < fn("AA")
True
>>> fn = _choose_key_function(len, False, False)
>>> fn("AA") < fn("X")
True
"""
if key is None:
if increasing:
key_fn = None
elif assume_negatable:
key_fn = operator.neg
else:
def key_fn(v):
return _OrderReversed(v)
else:
orig_key = key
if increasing:
key_fn = orig_key
elif assume_negatable:
def key_fn(v):
return -orig_key(v)
else:
def key_fn(v):
return _OrderReversed(orig_key(v))
return key_fn
def _make_reversed_subsequence_indices(prev_indices: List[Optional[int]], terminal_idx: int) -> Iterator[int]:
"""
Given a list of indices representing pointers to parent, and given a terminal pointer, yields indices from the terminal to the root.
>>> list(_make_reversed_subsequence_indices([None, 0, 0, 1, 2, 1], 5))
[5, 1, 0]
"""
idx: Optional[int] = terminal_idx
while idx is not None:
yield idx
idx = prev_indices[idx]
def _make_subsequence_indices(prev_indices: List[Optional[int]], terminal_idx: int) -> Iterator[int]:
"""
Given a list of indices representing pointers to parent, and given a terminal pointer, yields indices from the root to the terminal index.
>>> list(_make_subsequence_indices([None, 0, 0, 1, 2, 1], 5))
[0, 1, 5]
"""
return reversed(list(_make_reversed_subsequence_indices(prev_indices, terminal_idx)))
|
[
"typing.TypeVar"
] |
[((214, 226), 'typing.TypeVar', 'TypeVar', (['"""T"""'], {}), "('T')\n", (221, 226), False, 'from typing import TypeVar, Optional, List, Any, Iterator, Sequence, Callable\n')]
|
from cmath import sqrt
from math import hypot
from utils import near_equal
class Vector2D:
def __init__(self, x, y):
self.x = x
self.y = y
def normalized(self):
modulus = sqrt(self.x * self.x + self.y * self.y)
return Vector2D(self.x / modulus, self.y / modulus)
def dot(self, point):
return self.x * point.x + self.y * point.y
def dif(self, point):
return Vector2D(self.x - point.x, self.y - point.y)
def inverse(self):
x = 0 if self.x == 0 else 1 / self.x
y = 0 if self.y == 0 else 1 / self.y
return Vector2D(x, y) * (1 / Vector2D(x, y).dot(self))
def close(self, point):
return near_equal(self.x, point.x) and near_equal(self.y, point.y)
def __repr__(self, *args, **kwargs):
return '(' + str(self.x) + ', ' + str(self.y) + ')'
def __str__(self):
return '(' + str(self.x) + ', ' + str(self.y) + ')'
def __eq__(self, other):
return self.close(other)
def __add__(self, other):
return type(self)(self.x + other.x, self.y + other.y)
def __sub__(self, other):
return type(self)(self.x - other.x, self.y - other.y)
def __mul__(self, other):
return type(self)(self.x * other, self.y * other)
def distance_to(self, other):
return hypot((self.x - other.x), (self.y - other.y))
def __hash__(self, *args, **kwargs):
return hash(str(self))
import unittest
class Vector2DTests(unittest.TestCase):
def setUp(self):
self.v1 = Vector2D(1, 1)
self.v2 = Vector2D(2, 2)
self.v3 = Vector2D(3, 0)
def test_normalized(self):
self.assertEqual(self.v3.normalized(), Vector2D(1, 0))
def test_dot(self):
self.assertEqual(self.v1.dot(self.v2), 4)
def test_dif(self):
self.assertEqual(self.v1 - self.v2, Vector2D(-1, -1))
def test_inverse(self):
self.assertEqual(self.v3.inverse(), Vector2D(1 / 3, 0))
def test_mul(self):
self.assertEqual(self.v1 * 2, Vector2D(2, 2))
def test_sum(self):
self.assertEqual(self.v1 + self.v2, Vector2D(3, 3))
if __name__ == '__main__':
unittest.main()
|
[
"unittest.main",
"math.hypot",
"cmath.sqrt",
"utils.near_equal"
] |
[((2172, 2187), 'unittest.main', 'unittest.main', ([], {}), '()\n', (2185, 2187), False, 'import unittest\n'), ((206, 245), 'cmath.sqrt', 'sqrt', (['(self.x * self.x + self.y * self.y)'], {}), '(self.x * self.x + self.y * self.y)\n', (210, 245), False, 'from cmath import sqrt\n'), ((1326, 1367), 'math.hypot', 'hypot', (['(self.x - other.x)', '(self.y - other.y)'], {}), '(self.x - other.x, self.y - other.y)\n', (1331, 1367), False, 'from math import hypot\n'), ((692, 719), 'utils.near_equal', 'near_equal', (['self.x', 'point.x'], {}), '(self.x, point.x)\n', (702, 719), False, 'from utils import near_equal\n'), ((724, 751), 'utils.near_equal', 'near_equal', (['self.y', 'point.y'], {}), '(self.y, point.y)\n', (734, 751), False, 'from utils import near_equal\n')]
|
#External libs
import boto3
import sys
import json
import os
from botocore.exceptions import ClientError
#Establish our boto resources
client = boto3.client('lambda')
#This is only here for printing pretty colors
class color:
PURPLE = '\033[95m'
CYAN = '\033[96m'
DARKCYAN = '\033[36m'
BLUE = '\033[94m'
GREEN = '\033[92m'
YELLOW = '\033[93m'
RED = '\033[91m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
END = '\033[0m'
def check(lambda_name):
'''
Checks to see if the lambda exists already, if it does, the user will be prompted to use 'update-code' action
args:
lambda_name: name of the lambda, retrieved from config file
'''
try:
function = client.get_function(FunctionName=lambda_name)
if len(function) > 0:
return True
else:
return False
except ClientError as error:
print(error.response)
def check_alias(lambda_name, alias):
'''
Checks our lambda to ensure the alias we want to import from exits
args:
lambda_name: name of the lambda we're checking
alias: name of the alias we're checking
'''
try:
alias = client.get_alias(FunctionName=lambda_name, Name=alias)
except ClientError as error:
print(error.response['Error']['Message'])
sys.exit(1)
else:
print("Alias located successfully!")
finally:
return True
|
[
"sys.exit",
"boto3.client"
] |
[((145, 167), 'boto3.client', 'boto3.client', (['"""lambda"""'], {}), "('lambda')\n", (157, 167), False, 'import boto3\n'), ((1337, 1348), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (1345, 1348), False, 'import sys\n')]
|
import os
import glob
from preprocess import clahe_gridsize
import cv2
train_ratio = 0.7
eval_ratio = 0.3
test_ratio = 0.
def get_images(image_dir, preprocess=False, phase='train', healthy_included=True):
if preprocess:
limit = 2
grid_size = 8
if not os.path.exists(os.path.join(image_dir, 'ApparentRetinopathy_CLAHE')):
os.mkdir(os.path.join(image_dir, 'ApparentRetinopathy_CLAHE'))
os.mkdir(os.path.join(image_dir, 'NoApparentRetinopathy_CLAHE'))
apparent_ori = glob.glob(os.path.join(image_dir, 'ApparentRetinopathy/*.jpg'))
noapparent_ori = glob.glob(os.path.join(image_dir, 'NoApparentRetinopathy/*.jpg'))
apparent_ori.sort()
noapparent_ori.sort()
# mean brightness.
meanbright = 0.
for img_path in apparent_ori + noapparent_ori:
img_name = os.path.split(img_path)[-1].split('.')[0]
mask_path = os.path.join(image_dir, 'GroundTruth', 'MASK', img_name+'_MASK.tif')
gray = cv2.imread(img_path, 0)
mask_img = cv2.imread(mask_path, 0)
brightness = gray.sum() / (mask_img.shape[0] * mask_img.shape[1] - mask_img.sum() / 255.)
meanbright += brightness
meanbright /= len(apparent_ori + noapparent_ori)
# preprocess for apparent.
for img_path in apparent_ori:
img_name = os.path.split(img_path)[-1].split('.')[0]
mask_path = os.path.join(image_dir, 'GroundTruth', 'MASK', img_name+'_MASK.tif')
clahe_img = clahe_gridsize(img_path, mask_path, denoise=True, verbose=False, brightnessbalance=meanbright, cliplimit=limit, gridsize=grid_size)
cv2.imwrite(os.path.join(image_dir, 'ApparentRetinopathy_CLAHE', os.path.split(img_path)[-1]), clahe_img)
# preprocess for noapparent.
for img_path in noapparent_ori:
img_name = os.path.split(img_path)[-1].split('.')[0]
mask_path = os.path.join(image_dir, 'GroundTruth', 'MASK', img_name+'_MASK.tif')
clahe_img = clahe_gridsize(img_path, mask_path, denoise=True, verbose=False, brightnessbalance=meanbright, cliplimit=limit, gridsize=grid_size)
cv2.imwrite(os.path.join(image_dir, 'NoApparentRetinopathy_CLAHE', os.path.split(img_path)[-1]), clahe_img)
apparent = glob.glob(os.path.join(image_dir, 'ApparentRetinopathy_CLAHE/*.jpg'))
noapparent = glob.glob(os.path.join(image_dir, 'NoApparentRetinopathy_CLAHE/*.jpg'))
else:
apparent = glob.glob(os.path.join(image_dir, 'ApparentRetinopathy/*.jpg'))
noapparent = glob.glob(os.path.join(image_dir, 'NoApparentRetinopathy/*.jpg'))
apparent.sort()
noapparent.sort()
image_paths = []
mask_paths = []
if healthy_included:
imgset = [apparent, noapparent]
else:
imgset = [apparent]
for each in imgset:
train_number = int(len(each) * train_ratio)
eval_number = int(len(each) * eval_ratio)
if phase == 'train':
image_paths.extend(each[:train_number])
elif phase == 'eval':
image_paths.extend(each[train_number:train_number+eval_number])
else:
image_paths.extend(each[train_number+eval_number:])
mask_path= os.path.join(image_dir, 'GroundTruth')
lesions = ['EX', 'HE', 'MA', 'SE', 'MASK']
for image_path in image_paths:
paths = []
name = os.path.split(image_path)[1].split('.')[0]
for lesion in lesions:
candidate_path = os.path.join(mask_path, lesion, name+'_'+lesion+'.tif')
if os.path.exists(candidate_path):
paths.append(candidate_path)
else:
paths.append(None)
mask_paths.append(paths)
return image_paths, mask_paths
|
[
"os.path.exists",
"preprocess.clahe_gridsize",
"cv2.imread",
"os.path.split",
"os.path.join"
] |
[((3409, 3447), 'os.path.join', 'os.path.join', (['image_dir', '"""GroundTruth"""'], {}), "(image_dir, 'GroundTruth')\n", (3421, 3447), False, 'import os\n'), ((2478, 2536), 'os.path.join', 'os.path.join', (['image_dir', '"""ApparentRetinopathy_CLAHE/*.jpg"""'], {}), "(image_dir, 'ApparentRetinopathy_CLAHE/*.jpg')\n", (2490, 2536), False, 'import os\n'), ((2569, 2629), 'os.path.join', 'os.path.join', (['image_dir', '"""NoApparentRetinopathy_CLAHE/*.jpg"""'], {}), "(image_dir, 'NoApparentRetinopathy_CLAHE/*.jpg')\n", (2581, 2629), False, 'import os\n'), ((2670, 2722), 'os.path.join', 'os.path.join', (['image_dir', '"""ApparentRetinopathy/*.jpg"""'], {}), "(image_dir, 'ApparentRetinopathy/*.jpg')\n", (2682, 2722), False, 'import os\n'), ((2755, 2809), 'os.path.join', 'os.path.join', (['image_dir', '"""NoApparentRetinopathy/*.jpg"""'], {}), "(image_dir, 'NoApparentRetinopathy/*.jpg')\n", (2767, 2809), False, 'import os\n'), ((3667, 3728), 'os.path.join', 'os.path.join', (['mask_path', 'lesion', "(name + '_' + lesion + '.tif')"], {}), "(mask_path, lesion, name + '_' + lesion + '.tif')\n", (3679, 3728), False, 'import os\n'), ((3738, 3768), 'os.path.exists', 'os.path.exists', (['candidate_path'], {}), '(candidate_path)\n', (3752, 3768), False, 'import os\n'), ((296, 348), 'os.path.join', 'os.path.join', (['image_dir', '"""ApparentRetinopathy_CLAHE"""'], {}), "(image_dir, 'ApparentRetinopathy_CLAHE')\n", (308, 348), False, 'import os\n'), ((372, 424), 'os.path.join', 'os.path.join', (['image_dir', '"""ApparentRetinopathy_CLAHE"""'], {}), "(image_dir, 'ApparentRetinopathy_CLAHE')\n", (384, 424), False, 'import os\n'), ((447, 501), 'os.path.join', 'os.path.join', (['image_dir', '"""NoApparentRetinopathy_CLAHE"""'], {}), "(image_dir, 'NoApparentRetinopathy_CLAHE')\n", (459, 501), False, 'import os\n'), ((540, 592), 'os.path.join', 'os.path.join', (['image_dir', '"""ApparentRetinopathy/*.jpg"""'], {}), "(image_dir, 'ApparentRetinopathy/*.jpg')\n", (552, 592), False, 'import os\n'), ((633, 687), 'os.path.join', 'os.path.join', (['image_dir', '"""NoApparentRetinopathy/*.jpg"""'], {}), "(image_dir, 'NoApparentRetinopathy/*.jpg')\n", (645, 687), False, 'import os\n'), ((983, 1053), 'os.path.join', 'os.path.join', (['image_dir', '"""GroundTruth"""', '"""MASK"""', "(img_name + '_MASK.tif')"], {}), "(image_dir, 'GroundTruth', 'MASK', img_name + '_MASK.tif')\n", (995, 1053), False, 'import os\n'), ((1075, 1098), 'cv2.imread', 'cv2.imread', (['img_path', '(0)'], {}), '(img_path, 0)\n', (1085, 1098), False, 'import cv2\n'), ((1126, 1150), 'cv2.imread', 'cv2.imread', (['mask_path', '(0)'], {}), '(mask_path, 0)\n', (1136, 1150), False, 'import cv2\n'), ((1550, 1620), 'os.path.join', 'os.path.join', (['image_dir', '"""GroundTruth"""', '"""MASK"""', "(img_name + '_MASK.tif')"], {}), "(image_dir, 'GroundTruth', 'MASK', img_name + '_MASK.tif')\n", (1562, 1620), False, 'import os\n'), ((1647, 1782), 'preprocess.clahe_gridsize', 'clahe_gridsize', (['img_path', 'mask_path'], {'denoise': '(True)', 'verbose': '(False)', 'brightnessbalance': 'meanbright', 'cliplimit': 'limit', 'gridsize': 'grid_size'}), '(img_path, mask_path, denoise=True, verbose=False,\n brightnessbalance=meanbright, cliplimit=limit, gridsize=grid_size)\n', (1661, 1782), False, 'from preprocess import clahe_gridsize\n'), ((2096, 2166), 'os.path.join', 'os.path.join', (['image_dir', '"""GroundTruth"""', '"""MASK"""', "(img_name + '_MASK.tif')"], {}), "(image_dir, 'GroundTruth', 'MASK', img_name + '_MASK.tif')\n", (2108, 2166), False, 'import os\n'), ((2193, 2328), 'preprocess.clahe_gridsize', 'clahe_gridsize', (['img_path', 'mask_path'], {'denoise': '(True)', 'verbose': '(False)', 'brightnessbalance': 'meanbright', 'cliplimit': 'limit', 'gridsize': 'grid_size'}), '(img_path, mask_path, denoise=True, verbose=False,\n brightnessbalance=meanbright, cliplimit=limit, gridsize=grid_size)\n', (2207, 2328), False, 'from preprocess import clahe_gridsize\n'), ((3564, 3589), 'os.path.split', 'os.path.split', (['image_path'], {}), '(image_path)\n', (3577, 3589), False, 'import os\n'), ((1860, 1883), 'os.path.split', 'os.path.split', (['img_path'], {}), '(img_path)\n', (1873, 1883), False, 'import os\n'), ((2408, 2431), 'os.path.split', 'os.path.split', (['img_path'], {}), '(img_path)\n', (2421, 2431), False, 'import os\n'), ((913, 936), 'os.path.split', 'os.path.split', (['img_path'], {}), '(img_path)\n', (926, 936), False, 'import os\n'), ((1480, 1503), 'os.path.split', 'os.path.split', (['img_path'], {}), '(img_path)\n', (1493, 1503), False, 'import os\n'), ((2026, 2049), 'os.path.split', 'os.path.split', (['img_path'], {}), '(img_path)\n', (2039, 2049), False, 'import os\n')]
|
from src.slu.datareader import domain_set, y1_set, y2_set
from preprocess.gen_embeddings_for_slu import domain2slot
import torch
import torch.nn as nn
import os
from tqdm import tqdm
import numpy as np
import logging
logger = logging.getLogger()
from src.conll2002_metrics import *
class SLUTrainer(object):
def __init__(self, params, binary_slu_tagger, slotname_predictor, sent_repre_generator=None):
self.params = params
self.binary_slu_tagger = binary_slu_tagger
self.slotname_predictor = slotname_predictor
self.lr = params.lr
self.use_label_encoder = params.tr
self.num_domain = params.num_domain
if self.use_label_encoder:
self.sent_repre_generator = sent_repre_generator
self.loss_fn_mse = nn.MSELoss()
model_parameters = [
{"params": self.binary_slu_tagger.parameters()},
{"params": self.slotname_predictor.parameters()},
{"params": self.sent_repre_generator.parameters()}
]
else:
model_parameters = [
{"params": self.binary_slu_tagger.parameters()},
{"params": self.slotname_predictor.parameters()}
]
# Adam optimizer
self.optimizer = torch.optim.Adam(model_parameters, lr=self.lr)
self.loss_fn = nn.CrossEntropyLoss()
self.early_stop = params.early_stop
self.no_improvement_num = 0
self.best_f1 = 0
self.stop_training_flag = False
def train_step(self, X, lengths, y_bin, y_final, y_dm, templates=None, tem_lengths=None, epoch=None):
# print(X)
# print(lengths)
# print(y_bin)
# print(y_final)
# print(y_dm)
# print('-'*20)
self.binary_slu_tagger.train()
self.slotname_predictor.train()
if self.use_label_encoder:
self.sent_repre_generator.train()
bin_preds, lstm_hiddens = self.binary_slu_tagger(X, lengths)
# print(y_bin)
# y_bin_ = [i for i in y_bin]
# mx_len = max(lengths)
# # print(mx_len)
# for i in range(len(y_bin_)):
# while len(y_bin_[i]) < mx_len.item():
# y_bin_[i].append(0)
# y_bin_ = torch.tensor(y_bin_,device='cuda:0')
# print(y_bin_)
# print(bin_preds.size())
# loss_func = nn.CrossEntropyLoss(reduction='mean')
# t1 = bin_preds.view(-1, 3)
# t2 = y_bin_.view(-1)
# loss = loss_func(t1, t2)
# print(loss)
## optimize binary_slu_tagger
loss_bin = self.binary_slu_tagger.crf_loss(bin_preds, lengths, y_bin)
self.optimizer.zero_grad()
loss_bin.backward(retain_graph=True)
# self.optimizer.step()
# print(loss_bin)
## optimize slotname_predictor
pred_slotname_list, gold_slotname_list = self.slotname_predictor(y_dm, lstm_hiddens, binary_golds=y_bin, final_golds=y_final)
# for i in pred_slotname_list:
# print(i)
# print('-'*20)
# for i in gold_slotname_list:
# print(i)
# print('-'*20)
# print('-'*20)
# print(pred_slotname_list)
# print('-'*30)
# print(gold_slotname_list)
# return 1,0
# '''
# loss_slotname = torch.tensor(0)
# loss_slotname = loss_slotname.cuda()
with torch.autograd.set_detect_anomaly(True):
for pred_slotname_each_sample, gold_slotname_each_sample in zip(pred_slotname_list, gold_slotname_list):
assert pred_slotname_each_sample.size()[0] == gold_slotname_each_sample.size()[0]
# loss_slotname = loss_slotname + self.loss_fn(pred_slotname_each_sample, gold_slotname_each_sample.cuda())
loss_slotname = self.loss_fn(pred_slotname_each_sample, gold_slotname_each_sample.cuda())
# self.optimizer.zero_grad()
loss_slotname.backward(retain_graph=True)
# self.optimizer.step()
# loss = loss_bin + loss_slotname
# self.optimizer.zero_grad()
# # loss_slotname = loss_temp
# loss.backward()
# print(temp)
# self.optimizer.zero_grad()
# loss_slotname = temp
# self.optimizer.step()
if self.use_label_encoder:
templates_repre, input_repre = self.sent_repre_generator(templates, tem_lengths, lstm_hiddens, lengths)
input_repre = input_repre.detach()
template0_loss = self.loss_fn_mse(templates_repre[:, 0, :], input_repre)
template1_loss = -1 * self.loss_fn_mse(templates_repre[:, 1, :], input_repre)
template2_loss = -1 * self.loss_fn_mse(templates_repre[:, 2, :], input_repre)
input_repre.requires_grad = True
# self.optimizer.zero_grad()
template0_loss.backward(retain_graph=True)
template1_loss.backward(retain_graph=True)
template2_loss.backward(retain_graph=True)
# self.optimizer.step()
if epoch > 3:
templates_repre = templates_repre.detach()
input_loss0 = self.loss_fn_mse(input_repre, templates_repre[:, 0, :])
input_loss1 = -1 * self.loss_fn_mse(input_repre, templates_repre[:, 1, :])
input_loss2 = -1 * self.loss_fn_mse(input_repre, templates_repre[:, 2, :])
templates_repre.requires_grad = True
# self.optimizer.zero_grad()
input_loss0.backward(retain_graph=True)
input_loss1.backward(retain_graph=True)
input_loss2.backward(retain_graph=True)
self.optimizer.step()
if self.use_label_encoder:
return loss_bin.item(), loss_slotname.item(), template0_loss.item(), template1_loss.item()
else:
self.optimizer.step()
return loss_bin.item(), loss_slotname.item()
# '''
def evaluate(self, dataloader, istestset=False):
self.binary_slu_tagger.eval()
self.slotname_predictor.eval()
binary_preds, binary_golds = [], []
final_preds, final_golds = [], []
pbar = tqdm(enumerate(dataloader), total=len(dataloader))
for i, (X, lengths, y_bin, y_final, y_dm) in pbar:
binary_golds.extend(y_bin)
final_golds.extend(y_final)
X, lengths = X.cuda(), lengths.cuda()
bin_preds_batch, lstm_hiddens = self.binary_slu_tagger(X, lengths)
bin_preds_batch = self.binary_slu_tagger.crf_decode(bin_preds_batch, lengths)
binary_preds.extend(bin_preds_batch)
slotname_preds_batch = self.slotname_predictor(y_dm, lstm_hiddens, binary_preditions=bin_preds_batch, binary_golds=None, final_golds=None)
final_preds_batch = self.combine_binary_and_slotname_preds(y_dm, bin_preds_batch, slotname_preds_batch)
final_preds.extend(final_preds_batch)
# binary predictions
binary_preds = np.concatenate(binary_preds, axis=0)
binary_preds = list(binary_preds)
binary_golds = np.concatenate(binary_golds, axis=0)
binary_golds = list(binary_golds)
# final predictions
final_preds = np.concatenate(final_preds, axis=0)
final_preds = list(final_preds)
final_golds = np.concatenate(final_golds, axis=0)
final_golds = list(final_golds)
bin_lines, final_lines = [], []
for bin_pred, bin_gold, final_pred, final_gold in zip(binary_preds, binary_golds, final_preds, final_golds):
bin_slot_pred = y1_set[bin_pred]
bin_slot_gold = y1_set[bin_gold]
final_slot_pred = y2_set[final_pred]
final_slot_gold = y2_set[final_gold]
bin_lines.append("w" + " " + bin_slot_pred + " " + bin_slot_gold)
final_lines.append("w" + " " + final_slot_pred + " " + final_slot_gold)
bin_result = conll2002_measure(bin_lines)
bin_f1 = bin_result["fb1"]
final_result = conll2002_measure(final_lines)
final_f1 = final_result["fb1"]
if istestset == False: # dev set
if final_f1 > self.best_f1:
self.best_f1 = final_f1
self.no_improvement_num = 0
logger.info("Found better model!!")
self.save_model()
else:
self.no_improvement_num += 1
logger.info("No better model found (%d/%d)" % (self.no_improvement_num, self.early_stop))
if self.no_improvement_num >= self.early_stop:
self.stop_training_flag = True
return bin_f1, final_f1, self.stop_training_flag
def combine_binary_and_slotname_preds(self, dm_id_batch, binary_preds_batch, slotname_preds_batch):
"""
Input:
dm_id_batch: (bsz)
binary_preds: (bsz, seq_len)
slotname_preds: (bsz, num_slotname, slot_num)
Output:
final_preds: (bsz, seq_len)
"""
final_preds = []
for i in range(len(dm_id_batch)):
dm_id = dm_id_batch[i]
binary_preds = binary_preds_batch[i]
slotname_preds = slotname_preds_batch[i]
slot_list_based_dm = domain2slot[domain_set[dm_id]]
i = -1
final_preds_each = []
for bin_pred in binary_preds:
# values of bin_pred are 0 (O), or 1(B) or 2(I)
if bin_pred.item() == 0:
final_preds_each.append(0)
elif bin_pred.item() == 1:
i += 1
pred_slot_id = torch.argmax(slotname_preds[i])
slotname = "B-" + slot_list_based_dm[pred_slot_id]
final_preds_each.append(y2_set.index(slotname))
elif bin_pred.item() == 2:
if i == -1:
final_preds_each.append(0)
else:
pred_slot_id = torch.argmax(slotname_preds[i])
slotname = "I-" + slot_list_based_dm[pred_slot_id]
if slotname not in y2_set:
final_preds_each.append(0)
else:
final_preds_each.append(y2_set.index(slotname))
assert len(final_preds_each) == len(binary_preds)
final_preds.append(final_preds_each)
return final_preds
def save_model(self):
"""
save the best model
"""
saved_path = os.path.join(self.params.dump_path, "best_model.pth")
torch.save({
"binary_slu_tagger": self.binary_slu_tagger,
"slotname_predictor": self.slotname_predictor
}, saved_path)
logger.info("Best model has been saved to %s" % saved_path)
|
[
"torch.nn.MSELoss",
"torch.argmax",
"torch.nn.CrossEntropyLoss",
"logging.getLogger",
"torch.save",
"src.slu.datareader.y2_set.index",
"torch.optim.Adam",
"torch.autograd.set_detect_anomaly",
"os.path.join",
"numpy.concatenate"
] |
[((228, 247), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (245, 247), False, 'import logging\n'), ((1284, 1330), 'torch.optim.Adam', 'torch.optim.Adam', (['model_parameters'], {'lr': 'self.lr'}), '(model_parameters, lr=self.lr)\n', (1300, 1330), False, 'import torch\n'), ((1363, 1384), 'torch.nn.CrossEntropyLoss', 'nn.CrossEntropyLoss', ([], {}), '()\n', (1382, 1384), True, 'import torch.nn as nn\n'), ((7420, 7456), 'numpy.concatenate', 'np.concatenate', (['binary_preds'], {'axis': '(0)'}), '(binary_preds, axis=0)\n', (7434, 7456), True, 'import numpy as np\n'), ((7522, 7558), 'numpy.concatenate', 'np.concatenate', (['binary_golds'], {'axis': '(0)'}), '(binary_golds, axis=0)\n', (7536, 7558), True, 'import numpy as np\n'), ((7652, 7687), 'numpy.concatenate', 'np.concatenate', (['final_preds'], {'axis': '(0)'}), '(final_preds, axis=0)\n', (7666, 7687), True, 'import numpy as np\n'), ((7750, 7785), 'numpy.concatenate', 'np.concatenate', (['final_golds'], {'axis': '(0)'}), '(final_golds, axis=0)\n', (7764, 7785), True, 'import numpy as np\n'), ((11087, 11140), 'os.path.join', 'os.path.join', (['self.params.dump_path', '"""best_model.pth"""'], {}), "(self.params.dump_path, 'best_model.pth')\n", (11099, 11140), False, 'import os\n'), ((11149, 11269), 'torch.save', 'torch.save', (["{'binary_slu_tagger': self.binary_slu_tagger, 'slotname_predictor': self.\n slotname_predictor}", 'saved_path'], {}), "({'binary_slu_tagger': self.binary_slu_tagger,\n 'slotname_predictor': self.slotname_predictor}, saved_path)\n", (11159, 11269), False, 'import torch\n'), ((785, 797), 'torch.nn.MSELoss', 'nn.MSELoss', ([], {}), '()\n', (795, 797), True, 'import torch.nn as nn\n'), ((3533, 3572), 'torch.autograd.set_detect_anomaly', 'torch.autograd.set_detect_anomaly', (['(True)'], {}), '(True)\n', (3566, 3572), False, 'import torch\n'), ((10146, 10177), 'torch.argmax', 'torch.argmax', (['slotname_preds[i]'], {}), '(slotname_preds[i])\n', (10158, 10177), False, 'import torch\n'), ((10293, 10315), 'src.slu.datareader.y2_set.index', 'y2_set.index', (['slotname'], {}), '(slotname)\n', (10305, 10315), False, 'from src.slu.datareader import domain_set, y1_set, y2_set\n'), ((10508, 10539), 'torch.argmax', 'torch.argmax', (['slotname_preds[i]'], {}), '(slotname_preds[i])\n', (10520, 10539), False, 'import torch\n'), ((10803, 10825), 'src.slu.datareader.y2_set.index', 'y2_set.index', (['slotname'], {}), '(slotname)\n', (10815, 10825), False, 'from src.slu.datareader import domain_set, y1_set, y2_set\n')]
|
import random
import numpy as np
import tensorflow as tf
class Reproducibility:
"""
Singleton class for ensure reproducibility.
You indicates the seed and the execution is the same. The server initialice this class and the clients only
call/get a seed.
Server initialize it with Reproducibility(seed) before all executions
For get a seed, the client has to put Reproducibility.get_instance().set_seed(ID)
Is important to know that the reproducibility only works if you execute the experiment in CPU. Many ops in GPU
like convolutions are not deterministic and the don't replicate.
# Arguments:
seed: the main seed for server
# Properties:
seed:
return server seed
seeds:
return all seeds
"""
__instance = None
@staticmethod
def get_instance():
"""
Static access method.
# Returns:
instance: Singleton instance class
"""
if Reproducibility.__instance is None:
Reproducibility()
return Reproducibility.__instance
def __init__(self, seed=None):
"""
Virtually private constructor.
"""
if Reproducibility.__instance is not None:
raise Exception("This class is a singleton")
else:
self.__seed = seed
self.__seeds = {'server': self.__seed}
Reproducibility.__instance = self
if self.__seed is not None:
self.set_seed('server')
def set_seed(self, id):
"""
Set server and clients seed
# Arguments:
id: 'server' in server node and ID in client node
"""
if id not in self.__seeds.keys():
self.__seeds[id] = np.random.randint(2**32-1)
np.random.seed(self.__seeds[id])
random.seed(self.__seeds[id])
tf.random.set_seed(self.__seeds[id])
@property
def seed(self):
return self.__seed
@property
def seeds(self):
return self.__seeds
def delete_instance(self):
"""
Remove the singleton instance. Not recommended for normal use. This method is necessary for tests.
"""
if Reproducibility.__instance is not None:
del self.__seed
del self.__seeds
Reproducibility.__instance = None
|
[
"tensorflow.random.set_seed",
"random.seed",
"numpy.random.randint",
"numpy.random.seed"
] |
[((1806, 1838), 'numpy.random.seed', 'np.random.seed', (['self.__seeds[id]'], {}), '(self.__seeds[id])\n', (1820, 1838), True, 'import numpy as np\n'), ((1847, 1876), 'random.seed', 'random.seed', (['self.__seeds[id]'], {}), '(self.__seeds[id])\n', (1858, 1876), False, 'import random\n'), ((1885, 1921), 'tensorflow.random.set_seed', 'tf.random.set_seed', (['self.__seeds[id]'], {}), '(self.__seeds[id])\n', (1903, 1921), True, 'import tensorflow as tf\n'), ((1771, 1801), 'numpy.random.randint', 'np.random.randint', (['(2 ** 32 - 1)'], {}), '(2 ** 32 - 1)\n', (1788, 1801), True, 'import numpy as np\n')]
|
#!/usr/bin/env python
#
# (c) Copyright 2016 Hewlett Packard Enterprise Development LP
# (c) Copyright 2017 SUSE LLC
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
import unittest
from cephlm.tests.cephmetrics.system.test_data import HPssaCliData
from cephlm.cephmetrics.system.hpssacli import HPssaCli
from cephlm.common.exceptions import CephLMException
class TestHPssaCli(unittest.TestCase):
@mock.patch('swiftlm.hp_hardware.hpssacli.main')
def test_check_hpssacli_success(self, mock_hpssacli):
mock_hpssacli.return_value = HPssaCliData.MOCK_RESPONSE
result = HPssaCli.check_hpssacli()
for entry in result:
self.assertEqual(entry.dimensions['service'], 'ceph-storage')
self.assertTrue(entry.name.startswith('cephlm.hpssacli'))
@mock.patch('swiftlm.hp_hardware.hpssacli.main')
def test_check_hpssacli_failure(self, mock_hpssacli):
mock_hpssacli.side_effect = Exception("Unknown error")
regexp = "Unknown exception occured when " \
"executing swiftlm hpssacli module"
self.assertRaisesRegexp(CephLMException, regexp,
lambda: HPssaCli.check_hpssacli())
|
[
"mock.patch",
"cephlm.cephmetrics.system.hpssacli.HPssaCli.check_hpssacli"
] |
[((912, 959), 'mock.patch', 'mock.patch', (['"""swiftlm.hp_hardware.hpssacli.main"""'], {}), "('swiftlm.hp_hardware.hpssacli.main')\n", (922, 959), False, 'import mock\n'), ((1304, 1351), 'mock.patch', 'mock.patch', (['"""swiftlm.hp_hardware.hpssacli.main"""'], {}), "('swiftlm.hp_hardware.hpssacli.main')\n", (1314, 1351), False, 'import mock\n'), ((1099, 1124), 'cephlm.cephmetrics.system.hpssacli.HPssaCli.check_hpssacli', 'HPssaCli.check_hpssacli', ([], {}), '()\n', (1122, 1124), False, 'from cephlm.cephmetrics.system.hpssacli import HPssaCli\n'), ((1676, 1701), 'cephlm.cephmetrics.system.hpssacli.HPssaCli.check_hpssacli', 'HPssaCli.check_hpssacli', ([], {}), '()\n', (1699, 1701), False, 'from cephlm.cephmetrics.system.hpssacli import HPssaCli\n')]
|
"""Common utilities for alerts and alert digests."""
import os
from math import floor, log10
from typing import List, Optional, Union
from jinja2 import Environment, FileSystemLoader, select_autoescape
from chaos_genius.alerts.email import send_static_alert_email
from chaos_genius.core.utils.round import round_number
from chaos_genius.settings import CHAOSGENIUS_WEBAPP_URL
class AlertException(Exception):
"""A general exception in a specific alert.
Stores and prints alert ID and KPI ID.
"""
def __init__(self, message: str, alert_id: int, kpi_id: Optional[int] = None):
"""Initialize a new alert exception.
Args:
message: exception message.
alert_id: ID of alert where this originated from.
kpi_id: ID of KPI associated with the alert.
"""
if kpi_id:
message = f"(KPI: {kpi_id}, Alert: {alert_id}) {message}"
else:
message = f"(Alert: {alert_id}) {message}"
super().__init__(message)
def webapp_url_prefix():
"""Constructs webapp URL prefix with a trailing slash.
If not setup, this will be an invalid URL with an appropriate message.
TODO: redirect to docs link showing how to setup instead of invalid URL.
"""
if not CHAOSGENIUS_WEBAPP_URL:
return "Webapp URL not setup. Please setup CHAOSGENIUS_WEBAPP_URL in the environment file./"
forward_slash = "/" if not CHAOSGENIUS_WEBAPP_URL[-1] == "/" else ""
return f"{CHAOSGENIUS_WEBAPP_URL}{forward_slash}"
def change_message_from_percent(percent_change: Union[str, int, float]) -> str:
"""Creates a change message from given percentage change.
percent_change will be:
- "–" in case the last data point was missing or both the points had values 0
- 0 (int) in case there was no change
- positive value (int/float) in case there was an increase
- negative value (int/float) in case there was a decrease
"""
if isinstance(percent_change, str):
return percent_change
elif percent_change == 0:
return "No change (–)"
elif percent_change > 0:
return f"Increased by ({percent_change}%)"
else:
return f"Decreased by ({percent_change}%)"
def find_percentage_change(
curr_val: Union[int, float], prev_val: Optional[Union[int, float]]
) -> Union[int, float, str]:
"""Calculates percentage change between previous and current value."""
if prev_val is None:
# previous point wasn't found
return "–"
elif curr_val == 0 and prev_val == curr_val:
# both current and previous value are 0
return "–"
elif prev_val == 0:
# previous value is 0, but current value isn't
sign_ = "+" if curr_val > 0 else "-"
return sign_ + "inf"
else:
change = curr_val - prev_val
percentage_change = (change / prev_val) * 100
return round_number(percentage_change)
def send_email_using_template(
template_name: str,
recipient_emails: List[str],
subject: str,
files: List[dict],
**kwargs,
) -> None:
"""Sends an email using a template."""
path = os.path.join(os.path.dirname(__file__), "email_templates")
env = Environment(
loader=FileSystemLoader(path), autoescape=select_autoescape(["html", "xml"])
)
template = env.get_template(template_name)
send_static_alert_email(recipient_emails, subject, template.render(**kwargs), files)
HRN_PREFIXES = {
-9: "n",
-6: "µ",
-3: "m",
0: "",
3: "K",
6: "M",
9: "B",
12: "T",
}
def _get_exponent(num: float) -> int:
"""Returns the power of 10 to which the number is raised to."""
if num == 0:
return 0
return floor(log10(abs(num)))
def human_readable(num: float) -> str:
"""Returns the human readable format of a number."""
exponent = _get_exponent(num)
new_exponent = min((3 * floor(exponent / 3)), 12)
precision = 10 ** (new_exponent)
new_val = round(num / precision, 3)
human_readable_format = str(new_val) + HRN_PREFIXES[new_exponent]
return human_readable_format
|
[
"os.path.dirname",
"math.floor",
"jinja2.select_autoescape",
"jinja2.FileSystemLoader",
"chaos_genius.core.utils.round.round_number"
] |
[((3179, 3204), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (3194, 3204), False, 'import os\n'), ((3263, 3285), 'jinja2.FileSystemLoader', 'FileSystemLoader', (['path'], {}), '(path)\n', (3279, 3285), False, 'from jinja2 import Environment, FileSystemLoader, select_autoescape\n'), ((3298, 3332), 'jinja2.select_autoescape', 'select_autoescape', (["['html', 'xml']"], {}), "(['html', 'xml'])\n", (3315, 3332), False, 'from jinja2 import Environment, FileSystemLoader, select_autoescape\n'), ((3934, 3953), 'math.floor', 'floor', (['(exponent / 3)'], {}), '(exponent / 3)\n', (3939, 3953), False, 'from math import floor, log10\n'), ((2924, 2955), 'chaos_genius.core.utils.round.round_number', 'round_number', (['percentage_change'], {}), '(percentage_change)\n', (2936, 2955), False, 'from chaos_genius.core.utils.round import round_number\n')]
|
from typing import Any
from pydantic import validator, create_model, AnyHttpUrl
from pydantic.main import BaseModel
from tracardi.domain.entity import Entity
class AwsIamAuth(BaseModel):
aws_access_key_id: str
aws_secret_access_key: str
class Content(BaseModel):
content: str
type: str
@validator('content')
def must_have_2_letters(cls, v):
if len(v) < 2:
raise ValueError('String is too short. String must be at least two letters long.')
return v
class AwsSqsConfiguration(BaseModel):
source: Entity
message: Content
region_name: str
queue_url: AnyHttpUrl
delay_seconds: int = 0
message_attributes: str
class MessageAttribute:
def __init__(self, value):
self.value = value
if isinstance(value, bool) or isinstance(value, str):
self.type = "String"
self.key = "StringValue"
self.value = str(value)
elif isinstance(value, int) or isinstance(value, float):
self.type = "Number"
self.key = "StringValue"
self.value = str(value)
else:
self.type = "String"
self.key = "StringValue"
self.value = str(value)
def dict(self):
return {
"DataType": self.type,
self.key: self.value
}
class MessageAttributes:
def __init__(self, values: dict):
self._value = {}
for key, value in values.items():
if isinstance(value, dict) or isinstance(value, list):
raise ValueError("Attributes must be key value pairs. Allowed values are strings and "
"numbers")
self._value[key] = MessageAttribute(value)
def dict(self):
return {key: value.dict() for key, value in self._value.items()}
|
[
"pydantic.validator"
] |
[((313, 333), 'pydantic.validator', 'validator', (['"""content"""'], {}), "('content')\n", (322, 333), False, 'from pydantic import validator, create_model, AnyHttpUrl\n')]
|
from rest_framework import permissions
from rest_framework.generics import ListAPIView, CreateAPIView, RetrieveUpdateDestroyAPIView
from rest_framework.exceptions import ValidationError
from inventory.api.serializers import AssetSerializer, BorrowerSerializer, CategorySerializer
from inventory.models import Asset, Borrower, Category
from inventory.permissions import IsOwnerOrReadOnly
class AssetRetrieveUpdateDestroy(RetrieveUpdateDestroyAPIView):
lookup_field = 'uid'
serializer_class = AssetSerializer
permission_classes = [permissions.IsAuthenticatedOrReadOnly, IsOwnerOrReadOnly]
def get_queryset(self):
# return Asset.objects.filter(owner=self.request.user)
return Asset.objects.all()
class BorrowerRetrieveUpdateDestroy(RetrieveUpdateDestroyAPIView):
lookup_field = 'id'
serializer_class = BorrowerSerializer
permission_classes = [permissions.IsAuthenticatedOrReadOnly]
def get_queryset(self):
return Borrower.objects.all()
class AssetList(ListAPIView):
serializer_class = AssetSerializer
permission_classes = [permissions.IsAuthenticatedOrReadOnly]
def get_queryset(self):
"""
This view returns a list of all the assets owned by the currently authenticated user.
"""
return Asset.objects.filter(owner=self.request.user)
class CategoryList(ListAPIView):
serializer_class = CategorySerializer
permission_classes = [permissions.IsAuthenticatedOrReadOnly]
queryset = Category.objects.all()
class BorrowerList(ListAPIView):
serializer_class = BorrowerSerializer
permission_classes = [permissions.IsAuthenticatedOrReadOnly]
def get_queryset(self):
return Borrower.objects.filter(associated_user=self.request.user)
class AssetCreate(CreateAPIView):
serializer_class = AssetSerializer
permission_classes = [permissions.IsAuthenticatedOrReadOnly]
def perform_create(self, serializer):
serializer.save(owner=self.request.user)
def create(self, request, *args, **kwargs):
try:
name = request.data.get('name')
if name is not None and len(name) < 1:
raise ValidationError({'name': 'Must be at least one character in length.'})
except ValueError:
raise ValidationError({'name': 'Valid characters only.'})
return super().create(request, *args, **kwargs)
def get_queryset(self):
return Asset.objects.filter(owner=self.request.user)
class BorrowerCreate(CreateAPIView):
serializer_class = BorrowerSerializer
permission_classes = [permissions.IsAuthenticatedOrReadOnly]
def get_queryset(self):
return Borrower.objects.filter(associated_user=self.request.user)
class CategoryCreate(CreateAPIView):
serializer_class = CategorySerializer
permission_classes = [permissions.IsAuthenticatedOrReadOnly]
queryset = Category.objects.all()
|
[
"inventory.models.Asset.objects.filter",
"inventory.models.Borrower.objects.filter",
"inventory.models.Category.objects.all",
"inventory.models.Borrower.objects.all",
"inventory.models.Asset.objects.all",
"rest_framework.exceptions.ValidationError"
] |
[((1492, 1514), 'inventory.models.Category.objects.all', 'Category.objects.all', ([], {}), '()\n', (1512, 1514), False, 'from inventory.models import Asset, Borrower, Category\n'), ((2891, 2913), 'inventory.models.Category.objects.all', 'Category.objects.all', ([], {}), '()\n', (2911, 2913), False, 'from inventory.models import Asset, Borrower, Category\n'), ((707, 726), 'inventory.models.Asset.objects.all', 'Asset.objects.all', ([], {}), '()\n', (724, 726), False, 'from inventory.models import Asset, Borrower, Category\n'), ((970, 992), 'inventory.models.Borrower.objects.all', 'Borrower.objects.all', ([], {}), '()\n', (990, 992), False, 'from inventory.models import Asset, Borrower, Category\n'), ((1290, 1335), 'inventory.models.Asset.objects.filter', 'Asset.objects.filter', ([], {'owner': 'self.request.user'}), '(owner=self.request.user)\n', (1310, 1335), False, 'from inventory.models import Asset, Borrower, Category\n'), ((1700, 1758), 'inventory.models.Borrower.objects.filter', 'Borrower.objects.filter', ([], {'associated_user': 'self.request.user'}), '(associated_user=self.request.user)\n', (1723, 1758), False, 'from inventory.models import Asset, Borrower, Category\n'), ((2437, 2482), 'inventory.models.Asset.objects.filter', 'Asset.objects.filter', ([], {'owner': 'self.request.user'}), '(owner=self.request.user)\n', (2457, 2482), False, 'from inventory.models import Asset, Borrower, Category\n'), ((2672, 2730), 'inventory.models.Borrower.objects.filter', 'Borrower.objects.filter', ([], {'associated_user': 'self.request.user'}), '(associated_user=self.request.user)\n', (2695, 2730), False, 'from inventory.models import Asset, Borrower, Category\n'), ((2169, 2239), 'rest_framework.exceptions.ValidationError', 'ValidationError', (["{'name': 'Must be at least one character in length.'}"], {}), "({'name': 'Must be at least one character in length.'})\n", (2184, 2239), False, 'from rest_framework.exceptions import ValidationError\n'), ((2285, 2336), 'rest_framework.exceptions.ValidationError', 'ValidationError', (["{'name': 'Valid characters only.'}"], {}), "({'name': 'Valid characters only.'})\n", (2300, 2336), False, 'from rest_framework.exceptions import ValidationError\n')]
|
"""
author: thomaszdxsn
"""
import re
__all__ = ('RE_DATETIME', 'RE_IMG_SRC', 'RE_DATE', 'RE_UNIT_NUM', 'RE_CHINESE')
RE_DATETIME = re.compile(r'\d{2,4}-\d{1,2}-\d{1,2} \d{1,2}:\d{1,2}:\d{1,2}')
RE_DATE = re.compile(r'\d{2,4}-\d{1,2}-\d{1,2}')
RE_IMG_SRC = re.compile(r'<img\s+src=[\'\"](.*?)[\'\"].*?/?>', flags=re.DOTALL|re.MULTILINE)
RE_UNIT_NUM = re.compile(r'([\d.]+)(万)?')
RE_CHINESE = re.compile(r'([\u4e00-\u9fa5]+)')
|
[
"re.compile"
] |
[((135, 202), 're.compile', 're.compile', (['"""\\\\d{2,4}-\\\\d{1,2}-\\\\d{1,2} \\\\d{1,2}:\\\\d{1,2}:\\\\d{1,2}"""'], {}), "('\\\\d{2,4}-\\\\d{1,2}-\\\\d{1,2} \\\\d{1,2}:\\\\d{1,2}:\\\\d{1,2}')\n", (145, 202), False, 'import re\n'), ((208, 248), 're.compile', 're.compile', (['"""\\\\d{2,4}-\\\\d{1,2}-\\\\d{1,2}"""'], {}), "('\\\\d{2,4}-\\\\d{1,2}-\\\\d{1,2}')\n", (218, 248), False, 'import re\n'), ((260, 351), 're.compile', 're.compile', (['"""<img\\\\s+src=[\\\\\'\\\\"](.*?)[\\\\\'\\\\"].*?/?>"""'], {'flags': '(re.DOTALL | re.MULTILINE)'}), '(\'<img\\\\s+src=[\\\\\\\'\\\\"](.*?)[\\\\\\\'\\\\"].*?/?>\', flags=re.DOTALL |\n re.MULTILINE)\n', (270, 351), False, 'import re\n'), ((354, 381), 're.compile', 're.compile', (['"""([\\\\d.]+)(万)?"""'], {}), "('([\\\\d.]+)(万)?')\n", (364, 381), False, 'import re\n'), ((395, 429), 're.compile', 're.compile', (['"""([\\\\u4e00-\\\\u9fa5]+)"""'], {}), "('([\\\\u4e00-\\\\u9fa5]+)')\n", (405, 429), False, 'import re\n')]
|
# File : GetUniqueId.py
# Subprogram to generate a unique id for Pubnub message
# Author: <NAME>
import uuid
def GenerateId():
unique_id=uuid.uuid1()
str_id=str(unique_id)
return str_id
|
[
"uuid.uuid1"
] |
[((140, 152), 'uuid.uuid1', 'uuid.uuid1', ([], {}), '()\n', (150, 152), False, 'import uuid\n')]
|